def validate_external_vault(self): """ This function is for post OCS deployment vault verification Following checks will be done 1. check osd encryption keys in the vault path 2. check noobaa keys in the vault path 3. check storagecluster CR for 'kms' enabled Raises: NotFoundError : if key not found in vault OR in the resource CR """ self.gather_init_vault_conf() self.update_vault_env_vars() if config.ENV_DATA.get("use_vault_namespace"): self.get_vault_namespace() os.environ["VAULT_NAMESPACE"] = self.vault_namespace self.get_vault_backend_path() kvlist = vault_kv_list(self.vault_backend_path) # Check osd keys are present osds = pod.get_osd_pods() for osd in osds: pvc = ( osd.get() .get("metadata") .get("labels") .get(constants.CEPH_ROOK_IO_PVC_LABEL) ) if any(pvc in k for k in kvlist): logger.info(f"Vault: Found key for {pvc}") else: logger.error(f"Vault: Key not found for {pvc}") raise NotFoundError("Vault key not found") # Check for NOOBAA key if any(constants.VAULT_NOOBAA_ROOT_SECRET_PATH in k for k in kvlist): logger.info("Found Noobaa root secret path") else: logger.error("Noobaa root secret path not found") raise NotFoundError("Vault key for noobaa not found") # Check kms enabled if not is_kms_enabled(): logger.error("KMS not enabled on storage cluster") raise NotFoundError("KMS flag not found")
def get_opm_tool(): """ Download and install opm tool. """ try: opm_version = exec_cmd("opm version") except (CommandFailed, FileNotFoundError): logger.info("opm tool is not available, installing it") opm_release_tag = config.ENV_DATA.get("opm_release_tag", "latest") if opm_release_tag != "latest": opm_release_tag = f"tags/{opm_release_tag}" opm_releases_api_url = ( f"https://api.github.com/repos/{config.ENV_DATA.get('opm_owner_repo')}/" f"releases/{opm_release_tag}") if config.AUTH.get("github"): github_auth = ( config.AUTH["github"].get("username"), config.AUTH["github"].get("token"), ) logger.debug( f"Using github authentication (user: {github_auth[0]})") else: github_auth = None logger.warning( "Github credentials are not provided in data/auth.yaml file. " "You might encounter issues with accessing github api as it " "have very strict rate limit for unauthenticated requests " "(60 requests per hour). Please check docs/getting_started.md " "file to find how to configure github authentication.") release_data = json.loads( get_url_content(opm_releases_api_url, auth=github_auth)) if platform.system() == "Darwin": opm_asset_name = "darwin-amd64-opm" elif platform.system() == "Linux": opm_asset_name = "linux-amd64-opm" else: raise UnsupportedOSType for asset in release_data["assets"]: if asset["name"] == opm_asset_name: opm_download_url = asset["browser_download_url"] break else: raise NotFoundError( f"opm binary for selected type {opm_asset_name} was not found") prepare_bin_dir() bin_dir = os.path.expanduser(config.RUN["bin_dir"]) logger.info( f"Downloading opm tool from '{opm_download_url}' to '{bin_dir}'") download_file(opm_download_url, os.path.join(bin_dir, "opm")) cmd = f"chmod +x {os.path.join(bin_dir, 'opm')}" exec_cmd(cmd) opm_version = exec_cmd("opm version") logger.info(f"opm tool is available: {opm_version.stdout.decode('utf-8')}")
def link_spec_volume(spec_dict, volume_name, pvc_name): """ Find volume of given name in given spec dict, and set given pvc name as a pvc for the volume in the spec. Args: spec_dict (dict): dictionary with a container/template spec volume_name (str): name of the volume in the spec dict to link pvc_name (str): name of the target pvc (for the given volume) Raises: NotFoundError when given volume is not found in given spec """ is_pvc_linked = False for vol in spec_dict["volumes"]: if vol["name"] == volume_name: vol["persistentVolumeClaim"]["claimName"] = pvc_name is_pvc_linked = True break if not is_pvc_linked: raise NotFoundError("volume %s not found in given spec")
def prepare_disconnected_ocs_deployment(upgrade=False): """ Prepare disconnected ocs deployment: - get related images from OCS operator bundle csv - mirror related images to mirror registry - create imageContentSourcePolicy for the mirrored images - disable the default OperatorSources Args: upgrade (bool): is this fresh installation or upgrade process (default: False) Returns: str: mirrored OCS registry image prepared for disconnected installation or None (for live deployment) """ logger.info( f"Prepare for disconnected OCS {'upgrade' if upgrade else 'installation'}" ) if config.DEPLOYMENT.get("live_deployment"): get_opm_tool() pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret") ocp_version = get_ocp_version() index_image = f"{config.DEPLOYMENT['cs_redhat_operators_image']}:v{ocp_version}" mirrored_index_image = ( f"{config.DEPLOYMENT['mirror_registry']}/{constants.MIRRORED_INDEX_IMAGE_NAMESPACE}/" f"{constants.MIRRORED_INDEX_IMAGE_NAME}:v{ocp_version}") # prune an index image logger.info( f"Prune index image {index_image} -> {mirrored_index_image} " f"(packages: {', '.join(constants.DISCON_CL_REQUIRED_PACKAGES)})") cmd = (f"opm index prune -f {index_image} " f"-p {','.join(constants.DISCON_CL_REQUIRED_PACKAGES)} " f"-t {mirrored_index_image}") # opm tool doesn't have --authfile parameter, we have to supply auth # file through env variable os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path exec_cmd(cmd) # login to mirror registry login_to_mirror_registry(pull_secret_path) # push pruned index image to mirror registry logger.info( f"Push pruned index image to mirror registry: {mirrored_index_image}" ) cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}" exec_cmd(cmd) # mirror related images (this might take very long time) logger.info( f"Mirror images related to index image: {mirrored_index_image}") cmd = ( f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure " f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*'" ) oc_acm_result = exec_cmd(cmd, timeout=7200) for line in oc_acm_result.stdout.decode("utf-8").splitlines(): if "wrote mirroring manifests to" in line: break else: raise NotFoundError( "Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command." ) mirroring_manifests_dir = line.replace("wrote mirroring manifests to ", "") logger.debug( f"Mirrored manifests directory: {mirroring_manifests_dir}") # create ImageContentSourcePolicy icsp_file = os.path.join( f"{mirroring_manifests_dir}", "imageContentSourcePolicy.yaml", ) exec_cmd(f"oc apply -f {icsp_file}") # Disable the default OperatorSources exec_cmd( """oc patch OperatorHub cluster --type json """ """-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'""" ) # create redhat-operators CatalogSource catalog_source_data = templating.load_yaml( constants.CATALOG_SOURCE_YAML) catalog_source_manifest = tempfile.NamedTemporaryFile( mode="w+", prefix="catalog_source_manifest", delete=False) catalog_source_data["spec"]["image"] = f"{mirrored_index_image}" catalog_source_data["metadata"]["name"] = "redhat-operators" catalog_source_data["spec"][ "displayName"] = "Red Hat Operators - Mirrored" # remove ocs-operator-internal label catalog_source_data["metadata"]["labels"].pop("ocs-operator-internal", None) templating.dump_data_to_temp_yaml(catalog_source_data, catalog_source_manifest.name) exec_cmd(f"oc apply -f {catalog_source_manifest.name}") catalog_source = CatalogSource( resource_name="redhat-operators", namespace=constants.MARKETPLACE_NAMESPACE, ) # Wait for catalog source is ready catalog_source.wait_for_state("READY") return if config.DEPLOYMENT.get("stage_rh_osbs"): raise NotImplementedError( "Disconnected installation from stage is not implemented!") if upgrade: ocs_registry_image = config.UPGRADE.get("upgrade_ocs_registry_image", "") else: ocs_registry_image = config.DEPLOYMENT.get("ocs_registry_image", "") logger.debug(f"ocs-registry-image: {ocs_registry_image}") ocs_registry_image_and_tag = ocs_registry_image.rsplit(":", 1) image_tag = (ocs_registry_image_and_tag[1] if len(ocs_registry_image_and_tag) == 2 else None) if not image_tag and config.REPORTING.get("us_ds") == "DS": image_tag = get_latest_ds_olm_tag( upgrade=False if upgrade else config.UPGRADE.get("upgrade", False), latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest"), ) ocs_registry_image = f"{config.DEPLOYMENT['default_ocs_registry_image'].split(':')[0]}:{image_tag}" bundle_image = f"{constants.OCS_OPERATOR_BUNDLE_IMAGE}:{image_tag}" logger.debug(f"ocs-operator-bundle image: {bundle_image}") csv_yaml = get_csv_from_image(bundle_image) ocs_operator_image = (csv_yaml.get("spec", {}).get("install", {}).get( "spec", {}).get("deployments", [{}])[0].get("spec", {}).get("template", {}).get("spec", {}).get("containers", [{}])[0].get("image")) logger.debug(f"ocs-operator-image: {ocs_operator_image}") # prepare list related images (bundle, registry and operator images and all # images from relatedImages section from csv) ocs_related_images = [] ocs_related_images.append(get_image_with_digest(bundle_image)) ocs_registry_image_with_digest = get_image_with_digest(ocs_registry_image) ocs_related_images.append(ocs_registry_image_with_digest) ocs_related_images.append(get_image_with_digest(ocs_operator_image)) ocs_related_images += [ image["image"] for image in csv_yaml.get("spec").get("relatedImages") ] logger.debug(f"OCS Related Images: {ocs_related_images}") mirror_registry = config.DEPLOYMENT["mirror_registry"] # prepare images mapping file for mirroring mapping_file_content = [ f"{image}={mirror_registry}{image[image.index('/'):image.index('@')]}\n" for image in ocs_related_images ] logger.debug(f"Mapping file content: {mapping_file_content}") name = "ocs-images" mapping_file = os.path.join(config.ENV_DATA["cluster_path"], f"{name}-mapping.txt") # write mapping file to disk with open(mapping_file, "w") as f: f.writelines(mapping_file_content) # prepare ImageContentSourcePolicy for OCS images with open(constants.TEMPLATE_IMAGE_CONTENT_SOURCE_POLICY_YAML) as f: ocs_icsp = yaml.safe_load(f) ocs_icsp["metadata"]["name"] = name ocs_icsp["spec"]["repositoryDigestMirrors"] = [] for image in ocs_related_images: ocs_icsp["spec"]["repositoryDigestMirrors"].append({ "mirrors": [f"{mirror_registry}{image[image.index('/'):image.index('@')]}"], "source": image[:image.index("@")], }) logger.debug(f"OCS imageContentSourcePolicy: {yaml.safe_dump(ocs_icsp)}") ocs_icsp_file = os.path.join(config.ENV_DATA["cluster_path"], f"{name}-imageContentSourcePolicy.yaml") with open(ocs_icsp_file, "w+") as fs: yaml.safe_dump(ocs_icsp, fs) # create ImageContentSourcePolicy exec_cmd(f"oc apply -f {ocs_icsp_file}") # mirror images based on mapping file with prepare_customized_pull_secret(ocs_related_images) as authfile_fo: login_to_mirror_registry(authfile_fo.name) exec_cmd( f"oc image mirror --filter-by-os='.*' -f {mapping_file} --insecure " f"--registry-config={authfile_fo.name} --max-per-registry=2", timeout=3600, ) # mirror also OCS registry image with the original version tag (it will # be used for creating CatalogSource) mirrored_ocs_registry_image = ( f"{mirror_registry}{ocs_registry_image[ocs_registry_image.index('/'):]}" ) exec_cmd( f"podman push --tls-verify=false --authfile {authfile_fo.name} " f"{ocs_registry_image} {mirrored_ocs_registry_image}") # Disable the default OperatorSources exec_cmd( """oc patch OperatorHub cluster --type json """ """-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'""" ) # wait for newly created imageContentSourcePolicy is applied on all nodes wait_for_machineconfigpool_status("all") return mirrored_ocs_registry_image
def prune_and_mirror_index_image( index_image, mirrored_index_image, packages, icsp=None ): """ Prune given index image and push it to mirror registry, mirror all related images to mirror registry and create relevant imageContentSourcePolicy Args: index_image (str): index image which will be pruned and mirrored mirrored_index_image (str): mirrored index image which will be pushed to mirror registry packages (list): list of packages to keep icsp (dict): ImageContentSourcePolicy used for mirroring (workaround for stage images, which are pointing to different registry than they really are) Returns: str: path to generated catalogSource.yaml file """ get_opm_tool() pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret") # prune an index image logger.info( f"Prune index image {index_image} -> {mirrored_index_image} " f"(packages: {', '.join(packages)})" ) cmd = ( f"opm index prune -f {index_image} " f"-p {','.join(packages)} " f"-t {mirrored_index_image}" ) if config.DEPLOYMENT.get("opm_index_prune_binary_image"): cmd += ( f" --binary-image {config.DEPLOYMENT.get('opm_index_prune_binary_image')}" ) # opm tool doesn't have --authfile parameter, we have to supply auth # file through env variable os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path exec_cmd(cmd) # login to mirror registry login_to_mirror_registry(pull_secret_path) # push pruned index image to mirror registry logger.info(f"Push pruned index image to mirror registry: {mirrored_index_image}") cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}" exec_cmd(cmd) # mirror related images (this might take very long time) logger.info(f"Mirror images related to index image: {mirrored_index_image}") cmd = ( f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure " f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*' --max-per-registry=2" ) oc_acm_result = exec_cmd(cmd, timeout=7200) for line in oc_acm_result.stdout.decode("utf-8").splitlines(): if "wrote mirroring manifests to" in line: break else: raise NotFoundError( "Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command." ) mirroring_manifests_dir = line.replace("wrote mirroring manifests to ", "") logger.debug(f"Mirrored manifests directory: {mirroring_manifests_dir}") if icsp: # update mapping.txt file with urls updated based on provided # imageContentSourcePolicy mapping_file = os.path.join( f"{mirroring_manifests_dir}", "mapping.txt", ) with open(mapping_file) as mf: mapping_file_content = [] for line in mf: # exclude mirrored_index_image if mirrored_index_image in line: continue # apply any matching policy to all lines from mapping file for policy in icsp["spec"]["repositoryDigestMirrors"]: # we use only first defined mirror for particular source, # because we don't use any ICSP with more mirrors for one # source and it will make the logic very complex and # confusing line = line.replace(policy["source"], policy["mirrors"][0]) mapping_file_content.append(line) # write mapping file to disk mapping_file_updated = os.path.join( f"{mirroring_manifests_dir}", "mapping_updated.txt", ) with open(mapping_file_updated, "w") as f: f.writelines(mapping_file_content) # mirror images based on the updated mapping file # ignore errors, because some of the images might be already mirrored # via the `oc adm catalog mirror ...` command and not available on the # mirror exec_cmd( f"oc image mirror --filter-by-os='.*' -f {mapping_file_updated} " f"--insecure --registry-config={pull_secret_path} " "--max-per-registry=2 --continue-on-error=true --skip-missing=true", timeout=3600, ignore_error=True, ) # create ImageContentSourcePolicy icsp_file = os.path.join( f"{mirroring_manifests_dir}", "imageContentSourcePolicy.yaml", ) exec_cmd(f"oc apply -f {icsp_file}") logger.info("Sleeping for 60 sec to start update machineconfigpool status") time.sleep(60) wait_for_machineconfigpool_status("all") cs_file = os.path.join( f"{mirroring_manifests_dir}", "catalogSource.yaml", ) return cs_file