def generate_exporter_script(): """ Generates exporter script for RHCS cluster Returns: str: path to the exporter script """ # generate exporter script through packagemanifest ocs_operator_name = defaults.OCS_OPERATOR_NAME operator_selector = get_selector_for_ocs_operator() package_manifest = PackageManifest( resource_name=ocs_operator_name, selector=operator_selector, ) ocs_operator_data = package_manifest.get() encoded_script = ocs_operator_data["status"]["channels"][0]["currentCSVDesc"][ "annotations" ]["external.features.ocs.openshift.io/export-script"] # decode the exporter script and write to file external_script = decode(encoded_script) external_cluster_details_exporter = tempfile.NamedTemporaryFile( mode="w+", prefix="external-cluster-details-exporter-", suffix=".py", delete=False, ) with open(external_cluster_details_exporter.name, "w") as fd: fd.write(external_script) logger.info( f"external cluster script is located at {external_cluster_details_exporter.name}" ) return external_cluster_details_exporter.name
def run_ocs_upgrade(operation=None, *operation_args, **operation_kwargs): """ Run upgrade procedure of OCS cluster Args: operation: (function): Function to run operation_args: (iterable): Function's arguments operation_kwargs: (map): Function's keyword arguments """ ceph_cluster = CephCluster() original_ocs_version = config.ENV_DATA.get("ocs_version") upgrade_in_current_source = config.UPGRADE.get("upgrade_in_current_source", False) upgrade_ocs = OCSUpgrade( namespace=config.ENV_DATA["cluster_namespace"], version_before_upgrade=original_ocs_version, ocs_registry_image=config.UPGRADE.get("upgrade_ocs_registry_image"), upgrade_in_current_source=upgrade_in_current_source, ) upgrade_version = upgrade_ocs.get_upgrade_version() assert ( upgrade_ocs.get_parsed_versions()[1] >= upgrade_ocs.get_parsed_versions()[0]), ( f"Version you would like to upgrade to: {upgrade_version} " f"is not higher or equal to the version you currently running: " f"{upgrade_ocs.version_before_upgrade}") # create external cluster object if config.DEPLOYMENT["external_mode"]: host, user, password = get_external_cluster_client() external_cluster = ExternalCluster(host, user, password) # For external cluster , create the secrets if upgraded version is 4.8 if (config.DEPLOYMENT["external_mode"] and original_ocs_version == "4.7" and upgrade_version == "4.8"): external_cluster.create_object_store_user() access_key = config.EXTERNAL_MODE.get("access_key_rgw-admin-ops-user", "") secret_key = config.EXTERNAL_MODE.get("secret_key_rgw-admin-ops-user", "") if not (access_key and secret_key): raise ExternalClusterRGWAdminOpsUserException( "Access and secret key for rgw-admin-ops-user not found") cmd = ( f'oc create secret generic --type="kubernetes.io/rook"' f' "rgw-admin-ops-user" --from-literal=accessKey={access_key} --from-literal=secretKey={secret_key}' ) exec_cmd(cmd) csv_name_pre_upgrade = upgrade_ocs.get_csv_name_pre_upgrade() pre_upgrade_images = upgrade_ocs.get_pre_upgrade_image( csv_name_pre_upgrade) upgrade_ocs.load_version_config_file(upgrade_version) if config.DEPLOYMENT.get("disconnected") and not config.DEPLOYMENT.get( "disconnected_env_skip_image_mirroring"): upgrade_ocs.ocs_registry_image = prepare_disconnected_ocs_deployment( upgrade=True) log.info( f"Disconnected upgrade - new image: {upgrade_ocs.ocs_registry_image}" ) with CephHealthMonitor(ceph_cluster): channel = upgrade_ocs.set_upgrade_channel() upgrade_ocs.set_upgrade_images() live_deployment = config.DEPLOYMENT["live_deployment"] disable_addon = config.DEPLOYMENT.get("ibmcloud_disable_addon") if (config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM and live_deployment and not disable_addon): clustername = config.ENV_DATA.get("cluster_name") cmd = f"ibmcloud ks cluster addon disable openshift-data-foundation --cluster {clustername} -f" run_ibmcloud_cmd(cmd) time.sleep(120) cmd = ( f"ibmcloud ks cluster addon enable openshift-data-foundation --cluster {clustername} -f --version " f"{upgrade_version}.0 --param ocsUpgrade=true") run_ibmcloud_cmd(cmd) time.sleep(120) else: ui_upgrade_supported = False if config.UPGRADE.get("ui_upgrade"): if (version.get_semantic_ocp_version_from_config() == version.VERSION_4_9 and original_ocs_version == "4.8" and upgrade_version == "4.9"): ui_upgrade_supported = True else: log.warning( "UI upgrade combination is not supported. It will fallback to CLI upgrade" ) if ui_upgrade_supported: ocs_odf_upgrade_ui() else: if (config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM ) and not (upgrade_in_current_source): create_ocs_secret(config.ENV_DATA["cluster_namespace"]) if upgrade_version != "4.9": # In the case of upgrade to ODF 4.9, the ODF operator should upgrade # OCS automatically. upgrade_ocs.update_subscription(channel) if original_ocs_version == "4.8" and upgrade_version == "4.9": deployment = Deployment() deployment.subscribe_ocs() else: # In the case upgrade is not from 4.8 to 4.9 and we have manual approval strategy # we need to wait and approve install plan, otherwise it's approved in the # subscribe_ocs method. subscription_plan_approval = config.DEPLOYMENT.get( "subscription_plan_approval") if subscription_plan_approval == "Manual": wait_for_install_plan_and_approve( config.ENV_DATA["cluster_namespace"]) if (config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM ) and not (upgrade_in_current_source): for attempt in range(2): # We need to do it twice, because some of the SA are updated # after the first load of OCS pod after upgrade. So we need to # link updated SA again. log.info( f"Sleep 1 minute before attempt: {attempt + 1}/2 " "of linking secret/SAs") time.sleep(60) link_all_sa_and_secret_and_delete_pods( constants.OCS_SECRET, config.ENV_DATA["cluster_namespace"]) if operation: log.info(f"Calling test function: {operation}") _ = operation(*operation_args, **operation_kwargs) # Workaround for issue #2531 time.sleep(30) # End of workaround for sample in TimeoutSampler( timeout=725, sleep=5, func=upgrade_ocs.check_if_upgrade_completed, channel=channel, csv_name_pre_upgrade=csv_name_pre_upgrade, ): try: if sample: log.info("Upgrade success!") break except TimeoutException: raise TimeoutException("No new CSV found after upgrade!") old_image = upgrade_ocs.get_images_post_upgrade( channel, pre_upgrade_images, upgrade_version) verify_image_versions( old_image, upgrade_ocs.get_parsed_versions()[1], upgrade_ocs.version_before_upgrade, ) # update external secrets if config.DEPLOYMENT["external_mode"]: upgrade_version = version.get_semantic_version(upgrade_version, True) if upgrade_version >= version.VERSION_4_10: external_cluster.update_permission_caps() else: external_cluster.update_permission_caps(EXTERNAL_CLUSTER_USER) external_cluster.get_external_cluster_details() # update the external cluster details in secrets log.info("updating external cluster secret") external_cluster_details = NamedTemporaryFile( mode="w+", prefix="external-cluster-details-", delete=False, ) with open(external_cluster_details.name, "w") as fd: decoded_external_cluster_details = decode( config.EXTERNAL_MODE["external_cluster_details"]) fd.write(decoded_external_cluster_details) cmd = ( f"oc set data secret/rook-ceph-external-cluster-details -n {constants.OPENSHIFT_STORAGE_NAMESPACE} " f"--from-file=external_cluster_details={external_cluster_details.name}" ) exec_cmd(cmd) if config.ENV_DATA.get("mcg_only_deployment"): mcg_only_install_verification( ocs_registry_image=upgrade_ocs.ocs_registry_image) else: ocs_install_verification( timeout=600, skip_osd_distribution_check=True, ocs_registry_image=upgrade_ocs.ocs_registry_image, post_upgrade_verification=True, version_before_upgrade=upgrade_ocs.version_before_upgrade, )