def create_stage_operator_source(self): """ This prepare operator source for OCS deployment from stage. """ logger.info("Adding Stage Secret") # generate quay token credentials = { "user": { "username": config.DEPLOYMENT["stage_quay_username"], "password": config.DEPLOYMENT["stage_quay_password"], } } token = requests.post( url='https://quay.io/cnr/api/v1/users/login', data=json.dumps(credentials), headers={'Content-Type': 'application/json'}, ).json()['token'] stage_ns = config.DEPLOYMENT["stage_namespace"] # create Secret stage_os_secret = templating.load_yaml( constants.OPERATOR_SOURCE_SECRET_YAML ) stage_os_secret['metadata']['name'] = ( constants.OPERATOR_SOURCE_SECRET_NAME ) stage_os_secret['stringData']['token'] = token stage_secret_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix=constants.OPERATOR_SOURCE_SECRET_NAME, delete=False, ) templating.dump_data_to_temp_yaml( stage_os_secret, stage_secret_data_yaml.name ) run_cmd(f"oc create -f {stage_secret_data_yaml.name}") logger.info("Waiting 10 secs after secret is created") time.sleep(10) logger.info("Adding Stage Operator Source") # create Operator Source stage_os = templating.load_yaml( constants.OPERATOR_SOURCE_YAML ) stage_os['spec']['registryNamespace'] = stage_ns stage_os['spec']['authorizationToken']['secretName'] = ( constants.OPERATOR_SOURCE_SECRET_NAME ) stage_os_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix=constants.OPERATOR_SOURCE_NAME, delete=False ) templating.dump_data_to_temp_yaml( stage_os, stage_os_data_yaml.name ) run_cmd(f"oc create -f {stage_os_data_yaml.name}") catalog_source = CatalogSource( resource_name=constants.OPERATOR_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY")
def get_selector_for_ocs_operator(): """ This is the helper function which returns selector for package manifest. It's needed because of conflict with live content and multiple package manifests with the ocs-operator name. In case we are using internal builds we label catalog source or operator source and using the same selector for package manifest. Returns: str: Selector for package manifest if we are on internal builds, otherwise it returns None """ catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, selector=constants.OPERATOR_INTERNAL_SELECTOR, ) try: cs_data = catalog_source.get() if cs_data["items"]: return constants.OPERATOR_INTERNAL_SELECTOR except CommandFailed: log.info("Internal catalog source not found!") operator_source = OCP( kind="OperatorSource", resource_name=constants.OPERATOR_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) try: operator_source.get() return constants.OPERATOR_INTERNAL_SELECTOR except CommandFailed: log.info("Catalog source not found!")
def deploy_ocs_via_operator(self): """ Method for deploy OCS via OCS operator """ logger.info("Deployment of OCS via OCS operator") olm_manifest, subscription_manifest = ( self.get_olm_and_subscription_manifest()) self.label_and_taint_nodes() run_cmd(f"oc create -f {olm_manifest}") catalog_source = CatalogSource( resource_name='ocs-catalogsource', namespace='openshift-marketplace', ) # Wait for catalog source is ready catalog_source.wait_for_state("READY") run_cmd(f"oc create -f {subscription_manifest}") package_manifest = PackageManifest( resource_name=defaults.OCS_OPERATOR_NAME) # Wait for package manifest is ready package_manifest.wait_for_resource() channel = config.DEPLOYMENT.get('ocs_csv_channel') csv_name = package_manifest.get_current_csv(channel=channel) csv = CSV(resource_name=csv_name, kind="csv", namespace=self.namespace) csv.wait_for_phase("Succeeded", timeout=400) ocs_operator_storage_cluster_cr = config.DEPLOYMENT.get( 'ocs_operator_storage_cluster_cr') cluster_data = templating.load_yaml(ocs_operator_storage_cluster_cr) cluster_data['metadata']['name'] = config.ENV_DATA[ 'storage_cluster_name'] deviceset_data = templating.load_yaml(constants.DEVICESET_YAML) device_size = int( config.ENV_DATA.get('device_size', defaults.DEVICE_SIZE)) deviceset_data['dataPVCTemplate']['spec']['resources']['requests'][ 'storage'] = f"{device_size}Gi" # Allow lower instance requests and limits for OCS deployment if config.DEPLOYMENT.get('allow_lower_instance_requirements'): none_resources = {'Requests': None, 'Limits': None} deviceset_data["resources"] = deepcopy(none_resources) cluster_data['spec']['resources'] = { resource: deepcopy(none_resources) for resource in ['mon', 'mds', 'rgw', 'mgr', 'noobaa'] } if self.platform.lower() == constants.VSPHERE_PLATFORM: cluster_data['spec']['monPVCTemplate']['spec'][ 'storageClassName'] = constants.DEFAULT_SC_VSPHERE deviceset_data['dataPVCTemplate']['spec'][ 'storageClassName'] = constants.DEFAULT_SC_VSPHERE cluster_data['spec']['storageDeviceSets'] = [deviceset_data] cluster_data_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix='cluster_storage', delete=False) templating.dump_data_to_temp_yaml(cluster_data, cluster_data_yaml.name) run_cmd(f"oc create -f {cluster_data_yaml.name}")
def create_catalog_source(image=None, ignore_upgrade=False): """ This prepare catalog source manifest for deploy OCS operator from quay registry. Args: image (str): Image of ocs registry. ignore_upgrade (bool): Ignore upgrade parameter. """ logger.info("Adding CatalogSource") if not image: image = config.DEPLOYMENT.get('ocs_registry_image', '') if not ignore_upgrade: upgrade = config.UPGRADE.get('upgrade', False) else: upgrade = False image_and_tag = image.split(':') image = image_and_tag[0] image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None if not image_tag and config.REPORTING.get("us_ds") == 'DS': image_tag = get_latest_ds_olm_tag( upgrade, latest_tag=config.DEPLOYMENT.get( 'default_latest_tag', 'latest' ) ) catalog_source_data = templating.load_yaml( constants.CATALOG_SOURCE_YAML ) cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME change_cs_condition = ( (image or image_tag) and catalog_source_data['kind'] == 'CatalogSource' and catalog_source_data['metadata']['name'] == cs_name ) if change_cs_condition: default_image = config.DEPLOYMENT['default_ocs_registry_image'] image = image if image else default_image.split(':')[0] catalog_source_data['spec']['image'] = ( f"{image}:{image_tag if image_tag else 'latest'}" ) catalog_source_manifest = tempfile.NamedTemporaryFile( mode='w+', prefix='catalog_source_manifest', delete=False ) templating.dump_data_to_temp_yaml( catalog_source_data, catalog_source_manifest.name ) run_cmd(f"oc create -f {catalog_source_manifest.name}", timeout=2400) catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY")
def create_catalog_source(self): """ This prepare catalog source manifest for deploy OCS operator from quay registry. """ logger.info("Adding CatalogSource") image = config.DEPLOYMENT.get('ocs_registry_image', '') upgrade = config.DEPLOYMENT.get('upgrade', False) image_and_tag = image.split(':') image = image_and_tag[0] image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None if not image_tag and config.REPORTING.get("us_ds") == 'DS': image_tag = get_latest_ds_olm_tag( upgrade, latest_tag=config.DEPLOYMENT.get( 'default_latest_tag', 'latest' ) ) catalog_source_data = templating.load_yaml( constants.CATALOG_SOURCE_YAML ) cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME # TODO: Once needed we can also set the channel for the subscription # from config.DEPLOYMENT.get('ocs_csv_channel') change_cs_condition = ( (image or image_tag) and catalog_source_data['kind'] == 'CatalogSource' and catalog_source_data['metadata']['name'] == cs_name ) if change_cs_condition: default_image = config.DEPLOYMENT['default_ocs_registry_image'] image = image if image else default_image.split(':')[0] catalog_source_data['spec']['image'] = ( f"{image}:{image_tag if image_tag else 'latest'}" ) catalog_source_manifest = tempfile.NamedTemporaryFile( mode='w+', prefix='catalog_source_manifest', delete=False ) templating.dump_data_to_temp_yaml( catalog_source_data, catalog_source_manifest.name ) run_cmd(f"oc create -f {catalog_source_manifest.name}", timeout=2400) catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace='openshift-marketplace', ) # Wait for catalog source is ready catalog_source.wait_for_state("READY")
def set_upgrade_images(self): """ Set images for upgrade """ ocs_catalog = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) if not self.upgrade_in_current_source: if not ocs_catalog.is_exist() and not self.upgrade_in_current_source: log.info("OCS catalog source doesn't exist. Creating new one.") create_catalog_source(self.ocs_registry_image, ignore_upgrade=True) image_url = ocs_catalog.get_image_url() image_tag = ocs_catalog.get_image_name() log.info(f"Current image is: {image_url}, tag: {image_tag}") version_change = ( self.get_parsed_versions()[1] > self.get_parsed_versions()[0] ) if self.ocs_registry_image: image_url, new_image_tag = self.ocs_registry_image.split(":") elif config.UPGRADE.get("upgrade_to_latest", True) or version_change: new_image_tag = get_latest_ds_olm_tag() else: new_image_tag = get_next_version_available_for_upgrade(image_tag) cs_data = deepcopy(ocs_catalog.data) image_for_upgrade = ":".join([image_url, new_image_tag]) log.info(f"Image: {image_for_upgrade} will be used for upgrade.") cs_data["spec"]["image"] = image_for_upgrade with NamedTemporaryFile() as cs_yaml: dump_data_to_temp_yaml(cs_data, cs_yaml.name) ocs_catalog.apply(cs_yaml.name)
def get_ocs_build_number(): """ Gets the build number for ocs operator Return: str: build number for ocs operator version """ from ocs_ci.ocs.resources.catalog_source import CatalogSource build_num = "" ocs_catalog = CatalogSource(resource_name=OPERATOR_CATALOG_SOURCE_NAME, namespace="openshift-marketplace") if config.REPORTING['us_ds'] == 'DS': build_info = ocs_catalog.get_image_name() try: return build_info.split("-")[1].split(".")[0] except (IndexError, AttributeError): logging.warning("No version info found for OCS operator") return build_num
def test_upgrade(): ceph_cluster = CephCluster() ceph_cluster.enable_health_monitor() namespace = config.ENV_DATA['cluster_namespace'] ocs_catalog = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace="openshift-marketplace", ) image_url = ocs_catalog.get_image_url() image_tag = ocs_catalog.get_image_name() if config.DEPLOYMENT.get('upgrade_to_latest', True): new_image_tag = get_latest_ds_olm_tag() else: new_image_tag = get_next_version_available_for_upgrade(image_tag) cs_data = deepcopy(ocs_catalog.data) cs_data['spec']['image'] = ':'.join([image_url, new_image_tag]) package_manifest = PackageManifest(resource_name=OCS_OPERATOR_NAME) csv_name_pre_upgrade = package_manifest.get_current_csv() log.info(f"CSV name before upgrade is: {csv_name_pre_upgrade}") csv_pre_upgrade = CSV(resource_name=csv_name_pre_upgrade, namespace=namespace) pre_upgrade_images = get_images(csv_pre_upgrade.get()) with NamedTemporaryFile() as cs_yaml: dump_data_to_temp_yaml(cs_data, cs_yaml.name) ocs_catalog.apply(cs_yaml.name) # Wait for package manifest is ready package_manifest.wait_for_resource() subscription_plan_approval = config.DEPLOYMENT.get( 'subscription_plan_approval') if subscription_plan_approval == 'Manual': wait_for_install_plan_and_approve(namespace) attempts = 145 for attempt in range(1, attempts): if attempts == attempt: raise TimeoutException("No new CSV found after upgrade!") log.info(f"Attempt {attempt}/{attempts} to check CSV upgraded.") package_manifest.reload_data() csv_name_post_upgrade = package_manifest.get_current_csv() if csv_name_post_upgrade == csv_name_pre_upgrade: log.info(f"CSV is still: {csv_name_post_upgrade}") sleep(5) else: log.info(f"CSV now upgraded to: {csv_name_post_upgrade}") break csv_post_upgrade = CSV(resource_name=csv_name_post_upgrade, namespace=namespace) log.info( f"Waiting for CSV {csv_name_post_upgrade} to be in succeeded state") csv_post_upgrade.wait_for_phase("Succeeded", timeout=600) post_upgrade_images = get_images(csv_post_upgrade.get()) old_images, _, _ = get_upgrade_image_info(pre_upgrade_images, post_upgrade_images) verify_image_versions(old_images) ocs_install_verification(timeout=600, skip_osd_distribution_check=True) ceph_cluster.disable_health_monitor() if ceph_cluster.health_error_status: CephHealthException(f"During upgrade hit Ceph HEALTH_ERROR: " f"{ceph_cluster.health_error_status}")
def test_upgrade(): namespace = config.ENV_DATA['cluster_namespace'] ocs_catalog = CatalogSource( resource_name=OPERATOR_CATALOG_SOURCE_NAME, namespace="openshift-marketplace", ) image_url = ocs_catalog.get_image_url() image_tag = ocs_catalog.get_image_name() if config.DEPLOYMENT.get('upgrade_to_latest', True): new_image_tag = get_latest_ds_olm_tag() else: new_image_tag = get_next_version_available_for_upgrade(image_tag) cs_data = deepcopy(ocs_catalog.data) cs_data['spec']['image'] = ':'.join([image_url, new_image_tag]) package_manifest = PackageManifest(resource_name=OCS_OPERATOR_NAME) csv_name_pre_upgrade = package_manifest.get_current_csv() log.info(f"CSV name before upgrade is: {csv_name_pre_upgrade}") with NamedTemporaryFile() as cs_yaml: dump_data_to_temp_yaml(cs_data, cs_yaml.name) ocs_catalog.apply(cs_yaml.name) # Wait for package manifest is ready package_manifest.wait_for_resource() attempts = 145 for attempt in range(1, attempts): if attempts == attempt: raise TimeoutException("No new CSV found after upgrade!") log.info(f"Attempt {attempt}/{attempts} to check CSV upgraded.") package_manifest.reload_data() csv_name_post_upgrade = package_manifest.get_current_csv() if csv_name_post_upgrade == csv_name_pre_upgrade: log.info(f"CSV is still: {csv_name_post_upgrade}") sleep(5) else: log.info(f"CSV now upgraded to: {csv_name_post_upgrade}") break csv = CSV( resource_name=csv_name_post_upgrade, namespace=namespace ) log.info( f"Waiting for CSV {csv_name_post_upgrade} to be in succeeded state" ) csv.wait_for_phase("Succeeded", timeout=400) ocs_install_verification(timeout=600)
def prepare_disconnected_ocs_deployment(upgrade=False): """ Prepare disconnected ocs deployment: - get related images from OCS operator bundle csv - mirror related images to mirror registry - create imageContentSourcePolicy for the mirrored images - disable the default OperatorSources Args: upgrade (bool): is this fresh installation or upgrade process (default: False) Returns: str: mirrored OCS registry image prepared for disconnected installation or None (for live deployment) """ logger.info( f"Prepare for disconnected OCS {'upgrade' if upgrade else 'installation'}" ) if config.DEPLOYMENT.get("live_deployment"): get_opm_tool() pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret") ocp_version = get_ocp_version() index_image = f"{config.DEPLOYMENT['cs_redhat_operators_image']}:v{ocp_version}" mirrored_index_image = ( f"{config.DEPLOYMENT['mirror_registry']}/{constants.MIRRORED_INDEX_IMAGE_NAMESPACE}/" f"{constants.MIRRORED_INDEX_IMAGE_NAME}:v{ocp_version}") # prune an index image logger.info( f"Prune index image {index_image} -> {mirrored_index_image} " f"(packages: {', '.join(constants.DISCON_CL_REQUIRED_PACKAGES)})") cmd = (f"opm index prune -f {index_image} " f"-p {','.join(constants.DISCON_CL_REQUIRED_PACKAGES)} " f"-t {mirrored_index_image}") # opm tool doesn't have --authfile parameter, we have to supply auth # file through env variable os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path exec_cmd(cmd) # login to mirror registry login_to_mirror_registry(pull_secret_path) # push pruned index image to mirror registry logger.info( f"Push pruned index image to mirror registry: {mirrored_index_image}" ) cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}" exec_cmd(cmd) # mirror related images (this might take very long time) logger.info( f"Mirror images related to index image: {mirrored_index_image}") cmd = ( f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure " f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*'" ) oc_acm_result = exec_cmd(cmd, timeout=7200) for line in oc_acm_result.stdout.decode("utf-8").splitlines(): if "wrote mirroring manifests to" in line: break else: raise NotFoundError( "Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command." ) mirroring_manifests_dir = line.replace("wrote mirroring manifests to ", "") logger.debug( f"Mirrored manifests directory: {mirroring_manifests_dir}") # create ImageContentSourcePolicy icsp_file = os.path.join( f"{mirroring_manifests_dir}", "imageContentSourcePolicy.yaml", ) exec_cmd(f"oc apply -f {icsp_file}") # Disable the default OperatorSources exec_cmd( """oc patch OperatorHub cluster --type json """ """-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'""" ) # create redhat-operators CatalogSource catalog_source_data = templating.load_yaml( constants.CATALOG_SOURCE_YAML) catalog_source_manifest = tempfile.NamedTemporaryFile( mode="w+", prefix="catalog_source_manifest", delete=False) catalog_source_data["spec"]["image"] = f"{mirrored_index_image}" catalog_source_data["metadata"]["name"] = "redhat-operators" catalog_source_data["spec"][ "displayName"] = "Red Hat Operators - Mirrored" # remove ocs-operator-internal label catalog_source_data["metadata"]["labels"].pop("ocs-operator-internal", None) templating.dump_data_to_temp_yaml(catalog_source_data, catalog_source_manifest.name) exec_cmd(f"oc apply -f {catalog_source_manifest.name}") catalog_source = CatalogSource( resource_name="redhat-operators", namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY") return if config.DEPLOYMENT.get("stage_rh_osbs"): raise NotImplementedError( "Disconnected installation from stage is not implemented!") if upgrade: ocs_registry_image = config.UPGRADE.get("upgrade_ocs_registry_image", "") else: ocs_registry_image = config.DEPLOYMENT.get("ocs_registry_image", "") logger.debug(f"ocs-registry-image: {ocs_registry_image}") ocs_registry_image_and_tag = ocs_registry_image.rsplit(":", 1) image_tag = (ocs_registry_image_and_tag[1] if len(ocs_registry_image_and_tag) == 2 else None) if not image_tag and config.REPORTING.get("us_ds") == "DS": image_tag = get_latest_ds_olm_tag( upgrade=False if upgrade else config.UPGRADE.get("upgrade", False), latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest"), ) ocs_registry_image = f"{config.DEPLOYMENT['default_ocs_registry_image'].split(':')[0]}:{image_tag}" bundle_image = f"{constants.OCS_OPERATOR_BUNDLE_IMAGE}:{image_tag}" logger.debug(f"ocs-operator-bundle image: {bundle_image}") csv_yaml = get_csv_from_image(bundle_image) ocs_operator_image = (csv_yaml.get("spec", {}).get("install", {}).get( "spec", {}).get("deployments", [{}])[0].get("spec", {}).get("template", {}).get("spec", {}).get("containers", [{}])[0].get("image")) logger.debug(f"ocs-operator-image: {ocs_operator_image}") # prepare list related images (bundle, registry and operator images and all # images from relatedImages section from csv) ocs_related_images = [] ocs_related_images.append(get_image_with_digest(bundle_image)) ocs_registry_image_with_digest = get_image_with_digest(ocs_registry_image) ocs_related_images.append(ocs_registry_image_with_digest) ocs_related_images.append(get_image_with_digest(ocs_operator_image)) ocs_related_images += [ image["image"] for image in csv_yaml.get("spec").get("relatedImages") ] logger.debug(f"OCS Related Images: {ocs_related_images}") mirror_registry = config.DEPLOYMENT["mirror_registry"] # prepare images mapping file for mirroring mapping_file_content = [ f"{image}={mirror_registry}{image[image.index('/'):image.index('@')]}\n" for image in ocs_related_images ] logger.debug(f"Mapping file content: {mapping_file_content}") name = "ocs-images" mapping_file = os.path.join(config.ENV_DATA["cluster_path"], f"{name}-mapping.txt") # write mapping file to disk with open(mapping_file, "w") as f: f.writelines(mapping_file_content) # prepare ImageContentSourcePolicy for OCS images with open(constants.TEMPLATE_IMAGE_CONTENT_SOURCE_POLICY_YAML) as f: ocs_icsp = yaml.safe_load(f) ocs_icsp["metadata"]["name"] = name ocs_icsp["spec"]["repositoryDigestMirrors"] = [] for image in ocs_related_images: ocs_icsp["spec"]["repositoryDigestMirrors"].append({ "mirrors": [f"{mirror_registry}{image[image.index('/'):image.index('@')]}"], "source": image[:image.index("@")], }) logger.debug(f"OCS imageContentSourcePolicy: {yaml.safe_dump(ocs_icsp)}") ocs_icsp_file = os.path.join(config.ENV_DATA["cluster_path"], f"{name}-imageContentSourcePolicy.yaml") with open(ocs_icsp_file, "w+") as fs: yaml.safe_dump(ocs_icsp, fs) # create ImageContentSourcePolicy exec_cmd(f"oc apply -f {ocs_icsp_file}") # mirror images based on mapping file with prepare_customized_pull_secret(ocs_related_images) as authfile_fo: login_to_mirror_registry(authfile_fo.name) exec_cmd( f"oc image mirror --filter-by-os='.*' -f {mapping_file} --insecure " f"--registry-config={authfile_fo.name} --max-per-registry=2", timeout=3600, ) # mirror also OCS registry image with the original version tag (it will # be used for creating CatalogSource) mirrored_ocs_registry_image = ( f"{mirror_registry}{ocs_registry_image[ocs_registry_image.index('/'):]}" ) exec_cmd( f"podman push --tls-verify=false --authfile {authfile_fo.name} " f"{ocs_registry_image} {mirrored_ocs_registry_image}") # Disable the default OperatorSources exec_cmd( """oc patch OperatorHub cluster --type json """ """-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'""" ) # wait for newly created imageContentSourcePolicy is applied on all nodes wait_for_machineconfigpool_status("all") return mirrored_ocs_registry_image
def create_catalog_source(image=None, ignore_upgrade=False): """ This prepare catalog source manifest for deploy OCS operator from quay registry. Args: image (str): Image of ocs registry. ignore_upgrade (bool): Ignore upgrade parameter. """ if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM: link_all_sa_and_secret(constants.OCS_SECRET, constants.MARKETPLACE_NAMESPACE) logger.info("Adding CatalogSource") if not image: image = config.DEPLOYMENT.get("ocs_registry_image", "") if config.DEPLOYMENT.get("stage_rh_osbs"): image = config.DEPLOYMENT.get("stage_index_image", constants.OSBS_BOUNDLE_IMAGE) osbs_image_tag = config.DEPLOYMENT.get("stage_index_image_tag", f"v{get_ocp_version()}") image += f":{osbs_image_tag}" run_cmd("oc patch image.config.openshift.io/cluster --type merge -p '" '{"spec": {"registrySources": {"insecureRegistries": ' '["registry-proxy.engineering.redhat.com"]}}}\'') run_cmd( f"oc apply -f {constants.STAGE_IMAGE_CONTENT_SOURCE_POLICY_YAML}") logger.info( "Sleeping for 60 sec to start update machineconfigpool status") time.sleep(60) wait_for_machineconfigpool_status("all", timeout=1800) if not ignore_upgrade: upgrade = config.UPGRADE.get("upgrade", False) else: upgrade = False image_and_tag = image.split(":") image = image_and_tag[0] image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None if not image_tag and config.REPORTING.get("us_ds") == "DS": image_tag = get_latest_ds_olm_tag(upgrade, latest_tag=config.DEPLOYMENT.get( "default_latest_tag", "latest")) platform = config.ENV_DATA.get("platform").lower() if platform == constants.IBM_POWER_PLATFORM: # TEMP Hack... latest-stable-4.6 does not have ppc64le bits. image_tag = "latest-4.6" catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML) cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME change_cs_condition = ((image or image_tag) and catalog_source_data["kind"] == "CatalogSource" and catalog_source_data["metadata"]["name"] == cs_name) if change_cs_condition: default_image = config.DEPLOYMENT["default_ocs_registry_image"] image = image if image else default_image.split(":")[0] catalog_source_data["spec"][ "image"] = f"{image}:{image_tag if image_tag else 'latest'}" catalog_source_manifest = tempfile.NamedTemporaryFile( mode="w+", prefix="catalog_source_manifest", delete=False) templating.dump_data_to_temp_yaml(catalog_source_data, catalog_source_manifest.name) run_cmd(f"oc apply -f {catalog_source_manifest.name}", timeout=2400) catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY")
def update_non_ga_version(): """ Update pull secret, catalog source, subscription and operators to consume ODF and deployer versions provided in configuration. """ deployer_version = config.UPGRADE["deployer_version"] upgrade_ocs_version = config.UPGRADE["upgrade_ocs_version"] logger.info( f"Starting update to next version of deployer: {deployer_version}") logger.info("Update catalogsource") disable_specific_source(constants.OPERATOR_CATALOG_SOURCE_NAME) catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML) catalog_source_data["spec"]["image"] = config.DEPLOYMENT[ "ocs_registry_image"] catalog_source_manifest = tempfile.NamedTemporaryFile( mode="w+", prefix="catalog_source_manifest", delete=False) templating.dump_data_to_temp_yaml(catalog_source_data, catalog_source_manifest.name) run_cmd(f"oc apply -f {catalog_source_manifest.name}", timeout=2400) catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) logger.info("Edit annotation on the deployer CSV") run_cmd( f"oc annotate csv --overwrite ocs-osd-deployer.v{deployer_version} " 'operatorframework.io/properties=\'{"properties":[{"type":"olm.package",' '"value":{"packageName":"ocs-osd-deployer","version":' f'"{deployer_version}"' '}},{"type":"olm.gvk","value":{"group":"ocs.openshift.io","kind":' '"ManagedOCS","version":"v1alpha1"}},{"type":"olm.package.required",' '"value":{"packageName":"ose-prometheus-operator","versionRange":"4.10.0"}},' '{"type":"olm.package.required","value":{"packageName":"odf-operator",' f'"versionRange":"{upgrade_ocs_version}"' "}}]}' -n openshift-storage") # Wait for catalog source is ready catalog_source.wait_for_state("READY") ocs_channel = config.UPGRADE["ocs_channel"] odf_operator_u = f"odf-operator.v{upgrade_ocs_version}" mplace = constants.MARKETPLACE_NAMESPACE logger.info("Edit subscriptions") oc = ocp.OCP( kind=constants.SUBSCRIPTION, namespace=config.ENV_DATA["cluster_namespace"], ) subscriptions = oc.get()["items"] if config.ENV_DATA.get("cluster_type").lower() == "provider": subscriptions_to_edit = {"odf-operator"} patch_changes = [ f'[{{"op": "replace", "path": "/spec/channel", "value" : "{ocs_channel}"}}]', f'[{{"op": "replace", "path": "/spec/startingCSV", "value" : "{odf_operator_u}"}}]', ] elif config.ENV_DATA.get("cluster_type").lower() == "consumer": subscriptions_to_edit = { "ocs-operator", "odf-operator", "mcg-operator", "odf-csi-addons-operator", } patch_changes = [ f'[{{"op": "replace", "path": "/spec/channel", "value" : "{ocs_channel}"}}]', f'[{{"op": "replace", "path": "/spec/sourceNamespace", "value" : "{mplace}"}}]', f'[{{"op": "replace", "path": "/spec/startingCSV", "value" : "{odf_operator_u}"}}]', ] for subscription in subscriptions: for to_edit in subscriptions_to_edit: sub = (subscription.get("metadata").get("name") if subscription.get("metadata").get("name").startswith(to_edit) else "") if sub: for change in patch_changes: oc.patch( resource_name=sub, params=change, format_type="json", )
def prepare_disconnected_ocs_deployment(upgrade=False): """ Prepare disconnected ocs deployment: - mirror required images from redhat-operators - get related images from OCS operator bundle csv - mirror related images to mirror registry - create imageContentSourcePolicy for the mirrored images - disable the default OperatorSources Args: upgrade (bool): is this fresh installation or upgrade process (default: False) Returns: str: mirrored OCS registry image prepared for disconnected installation or None (for live deployment) """ if config.DEPLOYMENT.get("stage_rh_osbs"): raise NotImplementedError( "Disconnected installation from stage is not implemented!" ) logger.info( f"Prepare for disconnected OCS {'upgrade' if upgrade else 'installation'}" ) # Disable the default OperatorSources disable_default_sources() pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret") # login to mirror registry login_to_mirror_registry(pull_secret_path) # prepare main index image (redhat-operators-index for live deployment or # ocs-registry image for unreleased version) if config.DEPLOYMENT.get("live_deployment"): index_image = ( f"{config.DEPLOYMENT['cs_redhat_operators_image']}:v{get_ocp_version()}" ) mirrored_index_image = ( f"{config.DEPLOYMENT['mirror_registry']}/{constants.MIRRORED_INDEX_IMAGE_NAMESPACE}/" f"{constants.MIRRORED_INDEX_IMAGE_NAME}:v{get_ocp_version()}" ) else: if upgrade: index_image = config.UPGRADE.get("upgrade_ocs_registry_image", "") else: index_image = config.DEPLOYMENT.get("ocs_registry_image", "") ocs_registry_image_and_tag = index_image.rsplit(":", 1) image_tag = ( ocs_registry_image_and_tag[1] if len(ocs_registry_image_and_tag) == 2 else None ) if not image_tag: image_tag = get_latest_ds_olm_tag( upgrade=False if upgrade else config.UPGRADE.get("upgrade", False), latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest"), ) index_image = f"{config.DEPLOYMENT['default_ocs_registry_image'].split(':')[0]}:{image_tag}" mirrored_index_image = f"{config.DEPLOYMENT['mirror_registry']}{index_image[index_image.index('/'):]}" logger.debug(f"index_image: {index_image}") logger.debug(f"mirrored_index_image: {mirrored_index_image}") prune_and_mirror_index_image( index_image, mirrored_index_image, constants.DISCON_CL_REQUIRED_PACKAGES, ) # in case of live deployment, we have to create the mirrored # redhat-operators catalogsource if config.DEPLOYMENT.get("live_deployment"): # create redhat-operators CatalogSource catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML) catalog_source_manifest = tempfile.NamedTemporaryFile( mode="w+", prefix="catalog_source_manifest", delete=False ) catalog_source_data["spec"]["image"] = f"{mirrored_index_image}" catalog_source_data["metadata"]["name"] = constants.OPERATOR_CATALOG_SOURCE_NAME catalog_source_data["spec"]["displayName"] = "Red Hat Operators - Mirrored" # remove ocs-operator-internal label catalog_source_data["metadata"]["labels"].pop("ocs-operator-internal", None) templating.dump_data_to_temp_yaml( catalog_source_data, catalog_source_manifest.name ) exec_cmd( f"oc {'replace' if upgrade else 'apply'} -f {catalog_source_manifest.name}" ) catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY") return mirrored_index_image
def test_upgrade(): ceph_cluster = CephCluster() with CephHealthMonitor(ceph_cluster): namespace = config.ENV_DATA['cluster_namespace'] ocs_catalog = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) version_before_upgrade = config.ENV_DATA.get("ocs_version") upgrade_version = config.UPGRADE.get("upgrade_ocs_version", version_before_upgrade) parsed_version_before_upgrade = parse_version(version_before_upgrade) parsed_upgrade_version = parse_version(upgrade_version) assert parsed_upgrade_version >= parsed_version_before_upgrade, ( f"Version you would like to upgrade to: {upgrade_version} " f"is not higher or equal to the version you currently running: " f"{version_before_upgrade}") version_change = parsed_upgrade_version > parsed_version_before_upgrade if version_change: version_config_file = os.path.join(constants.CONF_DIR, 'ocs_version', f'ocs-{upgrade_version}.yaml') assert os.path.exists(version_config_file), ( f"OCS version config file {version_config_file} doesn't exist!" ) with open(os.path.abspath( os.path.expanduser(version_config_file))) as file_stream: custom_config_data = yaml.safe_load(file_stream) config.update(custom_config_data) image_url = ocs_catalog.get_image_url() image_tag = ocs_catalog.get_image_name() log.info(f"Current image is: {image_url}, tag: {image_tag}") ocs_registry_image = config.UPGRADE.get('upgrade_ocs_registry_image') if ocs_registry_image: image_url, new_image_tag = ocs_registry_image.split(':') elif config.UPGRADE.get('upgrade_to_latest', True) or version_change: new_image_tag = get_latest_ds_olm_tag() else: new_image_tag = get_next_version_available_for_upgrade(image_tag) cs_data = deepcopy(ocs_catalog.data) image_for_upgrade = ':'.join([image_url, new_image_tag]) log.info(f"Image: {image_for_upgrade} will be used for upgrade.") cs_data['spec']['image'] = image_for_upgrade operator_selector = get_selector_for_ocs_operator() package_manifest = PackageManifest( resource_name=OCS_OPERATOR_NAME, selector=operator_selector, ) csv_name_pre_upgrade = package_manifest.get_current_csv() log.info(f"CSV name before upgrade is: {csv_name_pre_upgrade}") csv_pre_upgrade = CSV(resource_name=csv_name_pre_upgrade, namespace=namespace) pre_upgrade_images = get_images(csv_pre_upgrade.get()) with NamedTemporaryFile() as cs_yaml: dump_data_to_temp_yaml(cs_data, cs_yaml.name) ocs_catalog.apply(cs_yaml.name) # Wait for package manifest is ready package_manifest.wait_for_resource() subscription_plan_approval = config.DEPLOYMENT.get( 'subscription_plan_approval') if subscription_plan_approval == 'Manual': wait_for_install_plan_and_approve(namespace) attempts = 145 for attempt in range(1, attempts): if attempts == attempt: raise TimeoutException("No new CSV found after upgrade!") log.info(f"Attempt {attempt}/{attempts} to check CSV upgraded.") package_manifest.reload_data() csv_name_post_upgrade = package_manifest.get_current_csv() if csv_name_post_upgrade == csv_name_pre_upgrade: log.info(f"CSV is still: {csv_name_post_upgrade}") sleep(5) else: log.info(f"CSV now upgraded to: {csv_name_post_upgrade}") break csv_post_upgrade = CSV(resource_name=csv_name_post_upgrade, namespace=namespace) log.info( f"Waiting for CSV {csv_name_post_upgrade} to be in succeeded state" ) if version_before_upgrade == '4.2' and upgrade_version == '4.3': log.info("Force creating Ceph toolbox after upgrade 4.2 -> 4.3") setup_ceph_toolbox(force_setup=True) csv_post_upgrade.wait_for_phase("Succeeded", timeout=600) post_upgrade_images = get_images(csv_post_upgrade.get()) old_images, _, _ = get_upgrade_image_info(pre_upgrade_images, post_upgrade_images) verify_image_versions(old_images, parsed_upgrade_version) ocs_install_verification(timeout=600, skip_osd_distribution_check=True)
def pytest_configure(config): """ Load config files, and initialize ocs-ci library. Args: config (pytest.config): Pytest config object """ if not (config.getoption("--help") or config.getoption("collectonly")): process_cluster_cli_params(config) config_file = os.path.expanduser( os.path.join( ocsci_config.RUN['log_dir'], f"run-{ocsci_config.RUN['run_id']}-config.yaml", )) dump_config_to_file(config_file) log.info(f"Dump of the consolidated config file is located here: " f"{config_file}") # Add OCS related versions to the html report and remove extraneous metadata markers_arg = config.getoption('-m') if ocsci_config.RUN['cli_params'].get('teardown') or ( "deployment" in markers_arg and ocsci_config.RUN['cli_params'].get('deploy')): log.info( "Skiping versions collecting because: Deploy or destroy of " "cluster is performed.") return print("Collecting Cluster versions") # remove extraneous metadata del config._metadata['Python'] del config._metadata['Packages'] del config._metadata['Plugins'] del config._metadata['Platform'] config._metadata['Test Run Name'] = get_testrun_name() try: # add cluster version clusterversion = get_cluster_version() config._metadata['Cluster Version'] = clusterversion # add ceph version ceph_version = get_ceph_version() config._metadata['Ceph Version'] = ceph_version # add csi versions csi_versions = get_csi_versions() config._metadata['cephfsplugin'] = csi_versions.get( 'csi-cephfsplugin') config._metadata['rbdplugin'] = csi_versions.get('csi-rbdplugin') # add ocs operator version ocs_catalog = CatalogSource( resource_name=OPERATOR_CATALOG_SOURCE_NAME, namespace="openshift-marketplace") if ocsci_config.REPORTING['us_ds'] == 'DS': config._metadata['OCS operator'] = ( ocs_catalog.get_image_name()) mods = get_version_info( namespace=ocsci_config.ENV_DATA['cluster_namespace']) skip_list = ['ocs-operator'] for key, val in mods.items(): if key not in skip_list: config._metadata[key] = val.rsplit('/')[-1] except (FileNotFoundError, CommandFailed): pass
def test_upgrade(): ceph_cluster = CephCluster() with CephHealthMonitor(ceph_cluster): namespace = config.ENV_DATA['cluster_namespace'] version_before_upgrade = config.ENV_DATA.get("ocs_version") upgrade_version = config.UPGRADE.get("upgrade_ocs_version", version_before_upgrade) ocs_registry_image = config.UPGRADE.get('upgrade_ocs_registry_image') if ocs_registry_image: upgrade_version = get_ocs_version_from_image(ocs_registry_image) parsed_version_before_upgrade = parse_version(version_before_upgrade) parsed_upgrade_version = parse_version(upgrade_version) assert parsed_upgrade_version >= parsed_version_before_upgrade, ( f"Version you would like to upgrade to: {upgrade_version} " f"is not higher or equal to the version you currently running: " f"{version_before_upgrade}") operator_selector = get_selector_for_ocs_operator() package_manifest = PackageManifest( resource_name=OCS_OPERATOR_NAME, selector=operator_selector, ) channel = config.DEPLOYMENT.get('ocs_csv_channel') csv_name_pre_upgrade = package_manifest.get_current_csv(channel) log.info(f"CSV name before upgrade is: {csv_name_pre_upgrade}") csv_pre_upgrade = CSV(resource_name=csv_name_pre_upgrade, namespace=namespace) pre_upgrade_images = get_images(csv_pre_upgrade.get()) version_change = parsed_upgrade_version > parsed_version_before_upgrade if version_change: version_config_file = os.path.join(constants.CONF_DIR, 'ocs_version', f'ocs-{upgrade_version}.yaml') load_config_file(version_config_file) ocs_catalog = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) upgrade_in_current_source = config.UPGRADE.get( 'upgrade_in_current_source', False) if not upgrade_in_current_source: if not ocs_catalog.is_exist() and not upgrade_in_current_source: log.info("OCS catalog source doesn't exist. Creating new one.") create_catalog_source(ocs_registry_image, ignore_upgrade=True) image_url = ocs_catalog.get_image_url() image_tag = ocs_catalog.get_image_name() log.info(f"Current image is: {image_url}, tag: {image_tag}") if ocs_registry_image: image_url, new_image_tag = ocs_registry_image.split(':') elif (config.UPGRADE.get('upgrade_to_latest', True) or version_change): new_image_tag = get_latest_ds_olm_tag() else: new_image_tag = get_next_version_available_for_upgrade( image_tag) cs_data = deepcopy(ocs_catalog.data) image_for_upgrade = ':'.join([image_url, new_image_tag]) log.info(f"Image: {image_for_upgrade} will be used for upgrade.") cs_data['spec']['image'] = image_for_upgrade with NamedTemporaryFile() as cs_yaml: dump_data_to_temp_yaml(cs_data, cs_yaml.name) ocs_catalog.apply(cs_yaml.name) # Wait for the new package manifest for upgrade. operator_selector = get_selector_for_ocs_operator() package_manifest = PackageManifest( resource_name=OCS_OPERATOR_NAME, selector=operator_selector, ) package_manifest.wait_for_resource() channel = config.DEPLOYMENT.get('ocs_csv_channel') if not channel: channel = package_manifest.get_default_channel() # update subscription subscription = OCP( resource_name=constants.OCS_SUBSCRIPTION, kind='subscription', namespace=config.ENV_DATA['cluster_namespace'], ) current_ocs_source = subscription.data['spec']['source'] log.info(f"Current OCS subscription source: {current_ocs_source}") ocs_source = current_ocs_source if upgrade_in_current_source else ( constants.OPERATOR_CATALOG_SOURCE_NAME) patch_subscription_cmd = ( f'oc patch subscription {constants.OCS_SUBSCRIPTION} ' f'-n {namespace} --type merge -p \'{{"spec":{{"channel": ' f'"{channel}", "source": "{ocs_source}"}}}}\'') run_cmd(patch_subscription_cmd) subscription_plan_approval = config.DEPLOYMENT.get( 'subscription_plan_approval') if subscription_plan_approval == 'Manual': wait_for_install_plan_and_approve(namespace) attempts = 145 for attempt in range(1, attempts + 1): log.info(f"Attempt {attempt}/{attempts} to check CSV upgraded.") csv_name_post_upgrade = package_manifest.get_current_csv(channel) if csv_name_post_upgrade == csv_name_pre_upgrade: log.info(f"CSV is still: {csv_name_post_upgrade}") sleep(5) else: log.info(f"CSV now upgraded to: {csv_name_post_upgrade}") break if attempts == attempt: raise TimeoutException("No new CSV found after upgrade!") csv_post_upgrade = CSV(resource_name=csv_name_post_upgrade, namespace=namespace) log.info( f"Waiting for CSV {csv_name_post_upgrade} to be in succeeded state" ) if version_before_upgrade == '4.2' and upgrade_version == '4.3': log.info("Force creating Ceph toolbox after upgrade 4.2 -> 4.3") setup_ceph_toolbox(force_setup=True) osd_count = get_osd_count() csv_post_upgrade.wait_for_phase("Succeeded", timeout=200 * osd_count) post_upgrade_images = get_images(csv_post_upgrade.get()) old_images, _, _ = get_upgrade_image_info(pre_upgrade_images, post_upgrade_images) verify_image_versions(old_images, parsed_upgrade_version) ocs_install_verification( timeout=600, skip_osd_distribution_check=True, ocs_registry_image=ocs_registry_image, post_upgrade_verification=True, )
def create_catalog_source(image=None, ignore_upgrade=False): """ This prepare catalog source manifest for deploy OCS operator from quay registry. Args: image (str): Image of ocs registry. ignore_upgrade (bool): Ignore upgrade parameter. """ # Because custom catalog source will be called: redhat-operators, we need to disable # default sources. This should not be an issue as OCS internal registry images # are now based on OCP registry image disable_specific_source(constants.OPERATOR_CATALOG_SOURCE_NAME) logger.info("Adding CatalogSource") if not image: image = config.DEPLOYMENT.get("ocs_registry_image", "") if config.DEPLOYMENT.get("stage_rh_osbs"): image = config.DEPLOYMENT.get("stage_index_image", constants.OSBS_BOUNDLE_IMAGE) ocp_version = version.get_semantic_ocp_version_from_config() osbs_image_tag = config.DEPLOYMENT.get( "stage_index_image_tag", f"v{ocp_version}" ) image += f":{osbs_image_tag}" run_cmd( "oc patch image.config.openshift.io/cluster --type merge -p '" '{"spec": {"registrySources": {"insecureRegistries": ' '["registry-proxy.engineering.redhat.com", "registry.stage.redhat.io"]' "}}}'" ) run_cmd(f"oc apply -f {constants.STAGE_IMAGE_CONTENT_SOURCE_POLICY_YAML}") logger.info("Sleeping for 60 sec to start update machineconfigpool status") time.sleep(60) wait_for_machineconfigpool_status("all", timeout=1800) if not ignore_upgrade: upgrade = config.UPGRADE.get("upgrade", False) else: upgrade = False image_and_tag = image.rsplit(":", 1) image = image_and_tag[0] image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None if not image_tag and config.REPORTING.get("us_ds") == "DS": image_tag = get_latest_ds_olm_tag( upgrade, latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest") ) catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML) if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM: create_ocs_secret(constants.MARKETPLACE_NAMESPACE) catalog_source_data["spec"]["secrets"] = [constants.OCS_SECRET] cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME change_cs_condition = ( (image or image_tag) and catalog_source_data["kind"] == "CatalogSource" and catalog_source_data["metadata"]["name"] == cs_name ) if change_cs_condition: default_image = config.DEPLOYMENT["default_ocs_registry_image"] image = image if image else default_image.rsplit(":", 1)[0] catalog_source_data["spec"][ "image" ] = f"{image}:{image_tag if image_tag else 'latest'}" catalog_source_manifest = tempfile.NamedTemporaryFile( mode="w+", prefix="catalog_source_manifest", delete=False ) templating.dump_data_to_temp_yaml(catalog_source_data, catalog_source_manifest.name) run_cmd(f"oc apply -f {catalog_source_manifest.name}", timeout=2400) catalog_source = CatalogSource( resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME, namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY")