예제 #1
0
def increase_pods_per_worker_node_count(pods_per_node=500, pods_per_core=10):
    """
    Function to increase pods per node count, default OCP supports 250 pods per node,
    from OCP 4.6 limit is going to be 500, but using this function can override this param
    to create more pods per worker nodes.
    more detail: https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-managing-max-pods.html

    Example: The default value for podsPerCore is 10 and the default value for maxPods is 250.
    This means that unless the node has 25 cores or more, by default, podsPerCore will be the limiting factor.

    WARN: This function will perform Unscheduling of workers and reboot so
    Please aware if there is any non-dc pods then expected to be terminated.

    Args:
        pods_per_node (int): Pods per node limit count
        pods_per_core (int): Pods per core limit count

    Raise:
        UnexpectedBehaviour if machineconfigpool not in Updating state within 40secs.

    """
    max_pods_template = templating.load_yaml(
        constants.PODS_PER_NODE_COUNT_YAML)
    max_pods_template["spec"]["kubeletConfig"]["podsPerCore"] = pods_per_core
    max_pods_template["spec"]["kubeletConfig"]["maxPods"] = pods_per_node

    # Create new max-pods label
    max_pods_obj = OCS(**max_pods_template)
    assert max_pods_obj.create()

    # Apply the changes in the workers
    label_cmd = "label machineconfigpool worker custom-kubelet=small-pods"
    ocp = OCP()
    assert ocp.exec_oc_cmd(command=label_cmd)

    # First wait for Updating status to become True, default it will be False &
    # machine_count and ready_machine_count will be equal
    get_cmd = "get machineconfigpools -o yaml"
    timout_counter = 0
    while True:
        output = ocp.exec_oc_cmd(command=get_cmd)
        update_status = (output.get("items")[1].get("status").get("conditions")
                         [4].get("status"))
        if update_status == "True":
            break
        elif timout_counter >= 8:
            raise UnexpectedBehaviour(
                "After 40sec machineconfigpool not in Updating state")
        else:
            logging.info("Sleep 5secs for updating status change")
            timout_counter += 1
            time.sleep(5)

    # Validate either change is successful
    output = ocp.exec_oc_cmd(command=get_cmd)
    machine_count = output.get("items")[1].get("status").get("machineCount")
    # During manual execution observed each node took 240+ sec for update
    timeout = machine_count * 300
    utils.wait_for_machineconfigpool_status(node_type=constants.WORKER_MACHINE,
                                            timeout=timeout)
예제 #2
0
    def flexy_post_processing(self):
        """
        Perform copying of flexy-dir to nfs mount
        and do this only if its jenkins run

        """
        abs_cluster_path = os.path.abspath(self.cluster_path)
        flexy_cluster_path = os.path.join(self.flexy_host_dir,
                                          "flexy/workdir/install-dir")
        if os.path.exists(abs_cluster_path):
            os.rmdir(abs_cluster_path)
        # Check whether its a jenkins run
        if self.is_jenkins_mount():
            flexy_nfs_path = os.path.join(
                constants.JENKINS_NFS_CURRENT_CLUSTER_DIR,
                constants.FLEXY_HOST_DIR)
            if not os.path.exists(flexy_nfs_path):
                shutil.copytree(
                    self.flexy_host_dir,
                    flexy_nfs_path,
                    symlinks=True,
                    ignore_dangling_symlinks=True,
                )
                chmod = f"chmod -R 777 {flexy_nfs_path}"
                run_cmd(chmod)
                logger.info(
                    f"Symlinking {abs_cluster_path} to {flexy_nfs_path}")
                os.symlink(
                    os.path.join(flexy_nfs_path,
                                 constants.FLEXY_RELATIVE_CLUSTER_DIR),
                    abs_cluster_path,
                )
        else:
            # recursively change permissions
            # for all the subdirs
            chmod = f"sudo chmod -R 777 {constants.FLEXY_HOST_DIR_PATH}"
            run_cmd(chmod)
            logger.info(f"Symlinking {flexy_cluster_path, abs_cluster_path}")
            os.symlink(flexy_cluster_path, abs_cluster_path)

        # Apply pull secrets on ocp cluster
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get("kubeconfig_location"))
        secret_cmd = (
            f"oc set data secret/pull-secret "
            f"--kubeconfig {kubeconfig} "
            f"-n {constants.OPENSHIFT_CONFIG_NAMESPACE} "
            f"--from-file=.dockerconfigjson={constants.DATA_DIR}/pull-secret")
        run_cmd(secret_cmd)

        if not config.ENV_DATA.get("skip_ntp_configuration", False):
            ntp_cmd = (f"oc --kubeconfig {kubeconfig} "
                       f"create -f {constants.NTP_CHRONY_CONF}")
            logger.info("Creating NTP chrony")
            run_cmd(ntp_cmd)
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")
예제 #3
0
def create_optional_operators_catalogsource_non_ga(force=False):
    """
    Creating optional operators CatalogSource and ImageContentSourcePolicy
    for non-ga OCP.

    Args:
        force (bool): enable/disable lso catalog setup

    """
    ocp_version = version.get_semantic_ocp_version_from_config()
    ocp_ga_version = get_ocp_ga_version(ocp_version)
    if ocp_ga_version and not force:
        return
    optional_operators_data = list(
        templating.load_yaml(constants.LOCAL_STORAGE_OPTIONAL_OPERATORS,
                             multi_document=True))
    optional_operators_yaml = tempfile.NamedTemporaryFile(
        mode="w+", prefix="optional_operators", delete=False)
    if config.DEPLOYMENT.get("optional_operators_image"):
        for _dict in optional_operators_data:
            if _dict.get("kind").lower() == "catalogsource":
                _dict["spec"]["image"] = config.DEPLOYMENT.get(
                    "optional_operators_image")
    if config.DEPLOYMENT.get("disconnected"):
        # in case of disconnected environment, we have to mirror all the
        # optional_operators images
        icsp = None
        for _dict in optional_operators_data:
            if _dict.get("kind").lower() == "catalogsource":
                index_image = _dict["spec"]["image"]
            if _dict.get("kind").lower() == "imagecontentsourcepolicy":
                icsp = _dict
        mirrored_index_image = (f"{config.DEPLOYMENT['mirror_registry']}/"
                                f"{index_image.split('/', 1)[-1]}")
        prune_and_mirror_index_image(
            index_image,
            mirrored_index_image,
            constants.DISCON_CL_REQUIRED_PACKAGES,
            icsp,
        )
        _dict["spec"]["image"] = mirrored_index_image
    templating.dump_data_to_temp_yaml(optional_operators_data,
                                      optional_operators_yaml.name)
    with open(optional_operators_yaml.name, "r") as f:
        logger.info(f.read())
    logger.info(
        "Creating optional operators CatalogSource and ImageContentSourcePolicy"
    )
    run_cmd(f"oc create -f {optional_operators_yaml.name}")
    wait_for_machineconfigpool_status("all")
예제 #4
0
파일: flexy.py 프로젝트: ypersky1980/ocs-ci
    def flexy_post_processing(self):
        """
        Update global pull-secret and configure ntp (if required).
        """
        # Apply pull secrets on ocp cluster
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get("kubeconfig_location"))
        secret_cmd = (
            f"oc set data secret/pull-secret "
            f"--kubeconfig {kubeconfig} "
            f"-n {constants.OPENSHIFT_CONFIG_NAMESPACE} "
            f"--from-file=.dockerconfigjson={constants.DATA_DIR}/pull-secret")
        run_cmd(secret_cmd)

        if not config.ENV_DATA.get("skip_ntp_configuration", False):
            ntp_cmd = (f"oc --kubeconfig {kubeconfig} "
                       f"create -f {constants.NTP_CHRONY_CONF}")
            logger.info("Creating NTP chrony")
            run_cmd(ntp_cmd)
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")
예제 #5
0
    def flexy_post_processing(self):
        """
        Perform a few actions required after flexy execution:
        - update global pull-secret
        - login to mirror registry (disconected cluster)
        - configure proxy server (disconnected cluster)
        - configure ntp (if required)
        """
        # Apply pull secrets on ocp cluster
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get("kubeconfig_location"))
        # load cluster info
        load_cluster_info()

        # if on disconnected cluster, perform required tasks
        pull_secret_path = os.path.join(constants.DATA_DIR, "pull-secret")
        if config.DEPLOYMENT.get("disconnected"):
            # login to mirror registry
            login_to_mirror_registry(pull_secret_path)
            # configure additional allowed domains in proxy
            configure_allowed_domains_in_proxy()

        # update pull-secret
        secret_cmd = (f"oc set data secret/pull-secret "
                      f"--kubeconfig {kubeconfig} "
                      f"-n {constants.OPENSHIFT_CONFIG_NAMESPACE} "
                      f"--from-file=.dockerconfigjson={pull_secret_path}")
        exec_cmd(secret_cmd)

        if not config.ENV_DATA.get("skip_ntp_configuration", False):
            ntp_cmd = (f"oc --kubeconfig {kubeconfig} "
                       f"create -f {constants.NTP_CHRONY_CONF}")
            logger.info("Creating NTP chrony")
            exec_cmd(ntp_cmd)
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")
예제 #6
0
def setup_local_storage(storageclass):
    """
    Setup the necessary resources for enabling local storage.

    Args:
        storageclass (string): storageClassName value to be used in
            LocalVolume CR based on LOCAL_VOLUME_YAML

    """
    # Get the worker nodes
    workers = get_nodes(node_type="worker")
    worker_names = [worker.name for worker in workers]
    logger.debug("Workers: %s", worker_names)

    ocp_version = version.get_semantic_ocp_version_from_config()
    ocs_version = version.get_semantic_ocs_version_from_config()
    ocp_ga_version = get_ocp_ga_version(ocp_version)
    if not ocp_ga_version:
        optional_operators_data = list(
            templating.load_yaml(constants.LOCAL_STORAGE_OPTIONAL_OPERATORS,
                                 multi_document=True))
        optional_operators_yaml = tempfile.NamedTemporaryFile(
            mode="w+", prefix="optional_operators", delete=False)
        if config.DEPLOYMENT.get("optional_operators_image"):
            for _dict in optional_operators_data:
                if _dict.get("kind").lower() == "catalogsource":
                    _dict["spec"]["image"] = config.DEPLOYMENT.get(
                        "optional_operators_image")
        if config.DEPLOYMENT.get("disconnected"):
            # in case of disconnected environment, we have to mirror all the
            # optional_operators images
            icsp = None
            for _dict in optional_operators_data:
                if _dict.get("kind").lower() == "catalogsource":
                    index_image = _dict["spec"]["image"]
                if _dict.get("kind").lower() == "imagecontentsourcepolicy":
                    icsp = _dict
            mirrored_index_image = (f"{config.DEPLOYMENT['mirror_registry']}/"
                                    f"{index_image.split('/', 1)[-1]}")
            prune_and_mirror_index_image(
                index_image,
                mirrored_index_image,
                constants.DISCON_CL_REQUIRED_PACKAGES,
                icsp,
            )
            _dict["spec"]["image"] = mirrored_index_image
        templating.dump_data_to_temp_yaml(optional_operators_data,
                                          optional_operators_yaml.name)
        with open(optional_operators_yaml.name, "r") as f:
            logger.info(f.read())
        logger.info(
            "Creating optional operators CatalogSource and ImageContentSourcePolicy"
        )
        run_cmd(f"oc create -f {optional_operators_yaml.name}")
        logger.info(
            "Sleeping for 60 sec to start update machineconfigpool status")
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")

    logger.info("Retrieving local-storage-operator data from yaml")
    lso_data = list(
        templating.load_yaml(constants.LOCAL_STORAGE_OPERATOR,
                             multi_document=True))

    # ensure namespace is correct
    lso_namespace = config.ENV_DATA["local_storage_namespace"]
    for data in lso_data:
        if data["kind"] == "Namespace":
            data["metadata"]["name"] = lso_namespace
        else:
            data["metadata"]["namespace"] = lso_namespace
        if data["kind"] == "OperatorGroup":
            data["spec"]["targetNamespaces"] = [lso_namespace]

    # Update local-storage-operator subscription data with channel
    for data in lso_data:
        if data["kind"] == "Subscription":
            data["spec"]["channel"] = get_lso_channel()
        if not ocp_ga_version:
            if data["kind"] == "Subscription":
                data["spec"]["source"] = "optional-operators"

    # Create temp yaml file and create local storage operator
    logger.info(
        "Creating temp yaml file with local-storage-operator data:\n %s",
        lso_data)
    lso_data_yaml = tempfile.NamedTemporaryFile(
        mode="w+", prefix="local_storage_operator", delete=False)
    templating.dump_data_to_temp_yaml(lso_data, lso_data_yaml.name)
    with open(lso_data_yaml.name, "r") as f:
        logger.info(f.read())
    logger.info("Creating local-storage-operator")
    run_cmd(f"oc create -f {lso_data_yaml.name}")

    local_storage_operator = ocp.OCP(kind=constants.POD,
                                     namespace=lso_namespace)
    assert local_storage_operator.wait_for_resource(
        condition=constants.STATUS_RUNNING,
        selector=constants.LOCAL_STORAGE_OPERATOR_LABEL,
        timeout=600,
    ), "Local storage operator did not reach running phase"

    # Add disks for vSphere/RHV platform
    platform = config.ENV_DATA.get("platform").lower()
    lso_type = config.DEPLOYMENT.get("type")

    if platform == constants.VSPHERE_PLATFORM:
        add_disk_for_vsphere_platform()

    if platform == constants.RHV_PLATFORM:
        add_disk_for_rhv_platform()

    if (ocp_version >= version.VERSION_4_6) and (ocs_version >=
                                                 version.VERSION_4_6):
        # Pull local volume discovery yaml data
        logger.info("Pulling LocalVolumeDiscovery CR data from yaml")
        lvd_data = templating.load_yaml(constants.LOCAL_VOLUME_DISCOVERY_YAML)
        # Set local-volume-discovery namespace
        lvd_data["metadata"]["namespace"] = lso_namespace

        worker_nodes = get_compute_node_names(no_replace=True)

        # Update local volume discovery data with Worker node Names
        logger.info(
            "Updating LocalVolumeDiscovery CR data with worker nodes Name: %s",
            worker_nodes,
        )
        lvd_data["spec"]["nodeSelector"]["nodeSelectorTerms"][0][
            "matchExpressions"][0]["values"] = worker_nodes
        lvd_data_yaml = tempfile.NamedTemporaryFile(
            mode="w+", prefix="local_volume_discovery", delete=False)
        templating.dump_data_to_temp_yaml(lvd_data, lvd_data_yaml.name)

        logger.info("Creating LocalVolumeDiscovery CR")
        run_cmd(f"oc create -f {lvd_data_yaml.name}")

        # Pull local volume set yaml data
        logger.info("Pulling LocalVolumeSet CR data from yaml")
        lvs_data = templating.load_yaml(constants.LOCAL_VOLUME_SET_YAML)

        # Since we don't have datastore with SSD on our current VMware machines, localvolumeset doesn't detect
        # NonRotational disk. As a workaround we are setting Rotational to device MechanicalProperties to detect
        # HDD disk
        if platform == constants.VSPHERE_PLATFORM or config.ENV_DATA.get(
                "local_storage_allow_rotational_disks"):
            logger.info("Adding Rotational for deviceMechanicalProperties spec"
                        " to detect HDD disk")
            lvs_data["spec"]["deviceInclusionSpec"][
                "deviceMechanicalProperties"].append("Rotational")

        # Update local volume set data with Worker node Names
        logger.info(
            "Updating LocalVolumeSet CR data with worker nodes Name: %s",
            worker_nodes)
        lvs_data["spec"]["nodeSelector"]["nodeSelectorTerms"][0][
            "matchExpressions"][0]["values"] = worker_nodes

        # Set storage class
        logger.info(
            "Updating LocalVolumeSet CR data with LSO storageclass: %s",
            storageclass)
        lvs_data["spec"]["storageClassName"] = storageclass

        # set volumeMode to Filesystem for MCG only deployment
        if config.ENV_DATA["mcg_only_deployment"]:
            lvs_data["spec"]["volumeMode"] = constants.VOLUME_MODE_FILESYSTEM

        lvs_data_yaml = tempfile.NamedTemporaryFile(mode="w+",
                                                    prefix="local_volume_set",
                                                    delete=False)
        templating.dump_data_to_temp_yaml(lvs_data, lvs_data_yaml.name)
        logger.info("Creating LocalVolumeSet CR")
        run_cmd(f"oc create -f {lvs_data_yaml.name}")
    else:
        # Retrieve NVME device path ID for each worker node
        device_paths = get_device_paths(worker_names)

        # Pull local volume yaml data
        logger.info("Pulling LocalVolume CR data from yaml")
        lv_data = templating.load_yaml(constants.LOCAL_VOLUME_YAML)

        # Set local-volume namespace
        lv_data["metadata"]["namespace"] = lso_namespace

        # Set storage class
        logger.info("Updating LocalVolume CR data with LSO storageclass: %s",
                    storageclass)
        for scd in lv_data["spec"]["storageClassDevices"]:
            scd["storageClassName"] = storageclass

        # Update local volume data with NVME IDs
        logger.info("Updating LocalVolume CR data with device paths: %s",
                    device_paths)
        lv_data["spec"]["storageClassDevices"][0]["devicePaths"] = device_paths

        # Create temp yaml file and create local volume
        lv_data_yaml = tempfile.NamedTemporaryFile(mode="w+",
                                                   prefix="local_volume",
                                                   delete=False)
        templating.dump_data_to_temp_yaml(lv_data, lv_data_yaml.name)
        logger.info("Creating LocalVolume CR")
        run_cmd(f"oc create -f {lv_data_yaml.name}")
    logger.info("Waiting 30 seconds for PVs to create")
    storage_class_device_count = 1
    if platform == constants.AWS_PLATFORM and not lso_type == constants.AWS_EBS:
        storage_class_device_count = 2
    elif platform == constants.IBM_POWER_PLATFORM:
        numberofstoragedisks = config.ENV_DATA.get("number_of_storage_disks",
                                                   1)
        storage_class_device_count = numberofstoragedisks
    elif platform == constants.VSPHERE_PLATFORM:
        # extra_disks is used in vSphere attach_disk() method
        storage_class_device_count = config.ENV_DATA.get("extra_disks", 1)
    expected_pvs = len(worker_names) * storage_class_device_count
    verify_pvs_created(expected_pvs, storageclass)
예제 #7
0
def prepare_disconnected_ocs_deployment(upgrade=False):
    """
    Prepare disconnected ocs deployment:
    - get related images from OCS operator bundle csv
    - mirror related images to mirror registry
    - create imageContentSourcePolicy for the mirrored images
    - disable the default OperatorSources

    Args:
        upgrade (bool): is this fresh installation or upgrade process
            (default: False)

    Returns:
        str: mirrored OCS registry image prepared for disconnected installation
            or None (for live deployment)

    """

    logger.info(
        f"Prepare for disconnected OCS {'upgrade' if upgrade else 'installation'}"
    )
    if config.DEPLOYMENT.get("live_deployment"):
        get_opm_tool()

        pull_secret_path = os.path.join(constants.TOP_DIR, "data",
                                        "pull-secret")
        ocp_version = get_ocp_version()
        index_image = f"{config.DEPLOYMENT['cs_redhat_operators_image']}:v{ocp_version}"
        mirrored_index_image = (
            f"{config.DEPLOYMENT['mirror_registry']}/{constants.MIRRORED_INDEX_IMAGE_NAMESPACE}/"
            f"{constants.MIRRORED_INDEX_IMAGE_NAME}:v{ocp_version}")
        # prune an index image
        logger.info(
            f"Prune index image {index_image} -> {mirrored_index_image} "
            f"(packages: {', '.join(constants.DISCON_CL_REQUIRED_PACKAGES)})")
        cmd = (f"opm index prune -f {index_image} "
               f"-p {','.join(constants.DISCON_CL_REQUIRED_PACKAGES)} "
               f"-t {mirrored_index_image}")
        # opm tool doesn't have --authfile parameter, we have to supply auth
        # file through env variable
        os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path
        exec_cmd(cmd)

        # login to mirror registry
        login_to_mirror_registry(pull_secret_path)

        # push pruned index image to mirror registry
        logger.info(
            f"Push pruned index image to mirror registry: {mirrored_index_image}"
        )
        cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}"
        exec_cmd(cmd)

        # mirror related images (this might take very long time)
        logger.info(
            f"Mirror images related to index image: {mirrored_index_image}")
        cmd = (
            f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure "
            f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*'"
        )
        oc_acm_result = exec_cmd(cmd, timeout=7200)

        for line in oc_acm_result.stdout.decode("utf-8").splitlines():
            if "wrote mirroring manifests to" in line:
                break
        else:
            raise NotFoundError(
                "Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command."
            )
        mirroring_manifests_dir = line.replace("wrote mirroring manifests to ",
                                               "")
        logger.debug(
            f"Mirrored manifests directory: {mirroring_manifests_dir}")

        # create ImageContentSourcePolicy
        icsp_file = os.path.join(
            f"{mirroring_manifests_dir}",
            "imageContentSourcePolicy.yaml",
        )
        exec_cmd(f"oc apply -f {icsp_file}")

        # Disable the default OperatorSources
        exec_cmd(
            """oc patch OperatorHub cluster --type json """
            """-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'"""
        )

        # create redhat-operators CatalogSource
        catalog_source_data = templating.load_yaml(
            constants.CATALOG_SOURCE_YAML)

        catalog_source_manifest = tempfile.NamedTemporaryFile(
            mode="w+", prefix="catalog_source_manifest", delete=False)
        catalog_source_data["spec"]["image"] = f"{mirrored_index_image}"
        catalog_source_data["metadata"]["name"] = "redhat-operators"
        catalog_source_data["spec"][
            "displayName"] = "Red Hat Operators - Mirrored"
        # remove ocs-operator-internal label
        catalog_source_data["metadata"]["labels"].pop("ocs-operator-internal",
                                                      None)

        templating.dump_data_to_temp_yaml(catalog_source_data,
                                          catalog_source_manifest.name)
        exec_cmd(f"oc apply -f {catalog_source_manifest.name}")
        catalog_source = CatalogSource(
            resource_name="redhat-operators",
            namespace=constants.MARKETPLACE_NAMESPACE,
        )
        # Wait for catalog source is ready
        catalog_source.wait_for_state("READY")

        return

    if config.DEPLOYMENT.get("stage_rh_osbs"):
        raise NotImplementedError(
            "Disconnected installation from stage is not implemented!")

    if upgrade:
        ocs_registry_image = config.UPGRADE.get("upgrade_ocs_registry_image",
                                                "")
    else:
        ocs_registry_image = config.DEPLOYMENT.get("ocs_registry_image", "")
    logger.debug(f"ocs-registry-image: {ocs_registry_image}")
    ocs_registry_image_and_tag = ocs_registry_image.rsplit(":", 1)
    image_tag = (ocs_registry_image_and_tag[1]
                 if len(ocs_registry_image_and_tag) == 2 else None)
    if not image_tag and config.REPORTING.get("us_ds") == "DS":
        image_tag = get_latest_ds_olm_tag(
            upgrade=False if upgrade else config.UPGRADE.get("upgrade", False),
            latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest"),
        )
        ocs_registry_image = f"{config.DEPLOYMENT['default_ocs_registry_image'].split(':')[0]}:{image_tag}"
    bundle_image = f"{constants.OCS_OPERATOR_BUNDLE_IMAGE}:{image_tag}"
    logger.debug(f"ocs-operator-bundle image: {bundle_image}")

    csv_yaml = get_csv_from_image(bundle_image)
    ocs_operator_image = (csv_yaml.get("spec", {}).get("install", {}).get(
        "spec",
        {}).get("deployments",
                [{}])[0].get("spec",
                             {}).get("template",
                                     {}).get("spec",
                                             {}).get("containers",
                                                     [{}])[0].get("image"))
    logger.debug(f"ocs-operator-image: {ocs_operator_image}")

    # prepare list related images (bundle, registry and operator images and all
    # images from relatedImages section from csv)
    ocs_related_images = []
    ocs_related_images.append(get_image_with_digest(bundle_image))
    ocs_registry_image_with_digest = get_image_with_digest(ocs_registry_image)
    ocs_related_images.append(ocs_registry_image_with_digest)
    ocs_related_images.append(get_image_with_digest(ocs_operator_image))
    ocs_related_images += [
        image["image"] for image in csv_yaml.get("spec").get("relatedImages")
    ]
    logger.debug(f"OCS Related Images: {ocs_related_images}")

    mirror_registry = config.DEPLOYMENT["mirror_registry"]
    # prepare images mapping file for mirroring
    mapping_file_content = [
        f"{image}={mirror_registry}{image[image.index('/'):image.index('@')]}\n"
        for image in ocs_related_images
    ]
    logger.debug(f"Mapping file content: {mapping_file_content}")

    name = "ocs-images"
    mapping_file = os.path.join(config.ENV_DATA["cluster_path"],
                                f"{name}-mapping.txt")
    # write mapping file to disk
    with open(mapping_file, "w") as f:
        f.writelines(mapping_file_content)

    # prepare ImageContentSourcePolicy for OCS images
    with open(constants.TEMPLATE_IMAGE_CONTENT_SOURCE_POLICY_YAML) as f:
        ocs_icsp = yaml.safe_load(f)

    ocs_icsp["metadata"]["name"] = name
    ocs_icsp["spec"]["repositoryDigestMirrors"] = []
    for image in ocs_related_images:
        ocs_icsp["spec"]["repositoryDigestMirrors"].append({
            "mirrors":
            [f"{mirror_registry}{image[image.index('/'):image.index('@')]}"],
            "source":
            image[:image.index("@")],
        })
    logger.debug(f"OCS imageContentSourcePolicy: {yaml.safe_dump(ocs_icsp)}")

    ocs_icsp_file = os.path.join(config.ENV_DATA["cluster_path"],
                                 f"{name}-imageContentSourcePolicy.yaml")
    with open(ocs_icsp_file, "w+") as fs:
        yaml.safe_dump(ocs_icsp, fs)

    # create ImageContentSourcePolicy
    exec_cmd(f"oc apply -f {ocs_icsp_file}")

    # mirror images based on mapping file
    with prepare_customized_pull_secret(ocs_related_images) as authfile_fo:
        login_to_mirror_registry(authfile_fo.name)
        exec_cmd(
            f"oc image mirror --filter-by-os='.*' -f {mapping_file} --insecure "
            f"--registry-config={authfile_fo.name} --max-per-registry=2",
            timeout=3600,
        )

        # mirror also OCS registry image with the original version tag (it will
        # be used for creating CatalogSource)
        mirrored_ocs_registry_image = (
            f"{mirror_registry}{ocs_registry_image[ocs_registry_image.index('/'):]}"
        )
        exec_cmd(
            f"podman push --tls-verify=false --authfile {authfile_fo.name} "
            f"{ocs_registry_image} {mirrored_ocs_registry_image}")

    # Disable the default OperatorSources
    exec_cmd(
        """oc patch OperatorHub cluster --type json """
        """-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'"""
    )

    # wait for newly created imageContentSourcePolicy is applied on all nodes
    wait_for_machineconfigpool_status("all")

    return mirrored_ocs_registry_image
예제 #8
0
def setup_local_storage(storageclass):
    """
    Setup the necessary resources for enabling local storage.

    Args:
        storageclass (string): storageClassName value to be used in
            LocalVolume CR based on LOCAL_VOLUME_YAML

    """
    # Get the worker nodes
    workers = get_nodes(node_type="worker")
    worker_names = [worker.name for worker in workers]
    logger.debug("Workers: %s", worker_names)

    ocp_version = get_ocp_version()
    ocs_version = config.ENV_DATA.get("ocs_version")
    ocp_ga_version = get_ocp_ga_version(ocp_version)
    if not ocp_ga_version:
        optional_operators_data = templating.load_yaml(
            constants.LOCAL_STORAGE_OPTIONAL_OPERATORS, multi_document=True
        )
        logger.info(
            "Creating temp yaml file with optional operators data:\n %s",
            optional_operators_data,
        )
        optional_operators_yaml = tempfile.NamedTemporaryFile(
            mode="w+", prefix="optional_operators", delete=False
        )
        templating.dump_data_to_temp_yaml(
            optional_operators_data, optional_operators_yaml.name
        )
        with open(optional_operators_yaml.name, "r") as f:
            logger.info(f.read())
        logger.info(
            "Creating optional operators CatalogSource and" " ImageContentSourcePolicy"
        )
        run_cmd(f"oc create -f {optional_operators_yaml.name}")
        logger.info("Sleeping for 60 sec to start update machineconfigpool status")
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")

    logger.info("Retrieving local-storage-operator data from yaml")
    lso_data = list(
        templating.load_yaml(constants.LOCAL_STORAGE_OPERATOR, multi_document=True)
    )

    # ensure namespace is correct
    lso_namespace = config.ENV_DATA["local_storage_namespace"]
    for data in lso_data:
        if data["kind"] == "Namespace":
            data["metadata"]["name"] = lso_namespace
        else:
            data["metadata"]["namespace"] = lso_namespace
        if data["kind"] == "OperatorGroup":
            data["spec"]["targetNamespaces"] = [lso_namespace]

    # Update local-storage-operator subscription data with channel
    for data in lso_data:
        if data["kind"] == "Subscription":
            data["spec"]["channel"] = get_lso_channel()
        if not ocp_ga_version:
            if data["kind"] == "Subscription":
                data["spec"]["source"] = "optional-operators"

    # Create temp yaml file and create local storage operator
    logger.info(
        "Creating temp yaml file with local-storage-operator data:\n %s", lso_data
    )
    lso_data_yaml = tempfile.NamedTemporaryFile(
        mode="w+", prefix="local_storage_operator", delete=False
    )
    templating.dump_data_to_temp_yaml(lso_data, lso_data_yaml.name)
    with open(lso_data_yaml.name, "r") as f:
        logger.info(f.read())
    logger.info("Creating local-storage-operator")
    run_cmd(f"oc create -f {lso_data_yaml.name}")

    local_storage_operator = ocp.OCP(kind=constants.POD, namespace=lso_namespace)
    assert local_storage_operator.wait_for_resource(
        condition=constants.STATUS_RUNNING,
        selector=constants.LOCAL_STORAGE_OPERATOR_LABEL,
        timeout=600,
    ), "Local storage operator did not reach running phase"

    # Add RDM disk for vSphere platform
    platform = config.ENV_DATA.get("platform").lower()
    lso_type = config.DEPLOYMENT.get("type")
    if platform == constants.VSPHERE_PLATFORM:
        # Types of LSO Deployment
        # Importing here to avoid circular dependency
        from ocs_ci.deployment.vmware import VSPHEREBASE

        vsphere_base = VSPHEREBASE()

        if lso_type == constants.RDM:
            logger.info(f"LSO Deployment type: {constants.RDM}")
            vsphere_base.add_rdm_disks()

        if lso_type == constants.VMDK:
            logger.info(f"LSO Deployment type: {constants.VMDK}")
            vsphere_base.attach_disk(
                config.ENV_DATA.get("device_size", defaults.DEVICE_SIZE),
                config.DEPLOYMENT.get("provision_type", constants.VM_DISK_TYPE),
            )

        if lso_type == constants.DIRECTPATH:
            raise NotImplementedError(
                "LSO Deployment for VMDirectPath is not implemented"
            )
    if (ocp_version >= "4.6") and (ocs_version >= "4.6"):
        # Pull local volume discovery yaml data
        logger.info("Pulling LocalVolumeDiscovery CR data from yaml")
        lvd_data = templating.load_yaml(constants.LOCAL_VOLUME_DISCOVERY_YAML)
        # Set local-volume-discovery namespace
        lvd_data["metadata"]["namespace"] = lso_namespace

        worker_nodes = get_compute_node_names(no_replace=True)

        # Update local volume discovery data with Worker node Names
        logger.info(
            "Updating LocalVolumeDiscovery CR data with worker nodes Name: %s",
            worker_nodes,
        )
        lvd_data["spec"]["nodeSelector"]["nodeSelectorTerms"][0]["matchExpressions"][0][
            "values"
        ] = worker_nodes
        lvd_data_yaml = tempfile.NamedTemporaryFile(
            mode="w+", prefix="local_volume_discovery", delete=False
        )
        templating.dump_data_to_temp_yaml(lvd_data, lvd_data_yaml.name)

        logger.info("Creating LocalVolumeDiscovery CR")
        run_cmd(f"oc create -f {lvd_data_yaml.name}")

        # Pull local volume set yaml data
        logger.info("Pulling LocalVolumeSet CR data from yaml")
        lvs_data = templating.load_yaml(constants.LOCAL_VOLUME_SET_YAML)

        # Since we don't have datastore with SSD on our current VMware machines, localvolumeset doesn't detect
        # NonRotational disk. As a workaround we are setting Rotational to device MechanicalProperties to detect
        # HDD disk
        if platform == constants.VSPHERE_PLATFORM or config.ENV_DATA.get(
            "local_storage_allow_rotational_disks"
        ):
            logger.info(
                "Adding Rotational for deviceMechanicalProperties spec"
                " to detect HDD disk"
            )
            lvs_data["spec"]["deviceInclusionSpec"][
                "deviceMechanicalProperties"
            ].append("Rotational")

        # Update local volume set data with Worker node Names
        logger.info(
            "Updating LocalVolumeSet CR data with worker nodes Name: %s", worker_nodes
        )
        lvs_data["spec"]["nodeSelector"]["nodeSelectorTerms"][0]["matchExpressions"][0][
            "values"
        ] = worker_nodes

        # Set storage class
        logger.info(
            "Updating LocalVolumeSet CR data with LSO storageclass: %s", storageclass
        )
        lvs_data["spec"]["storageClassName"] = storageclass

        lvs_data_yaml = tempfile.NamedTemporaryFile(
            mode="w+", prefix="local_volume_set", delete=False
        )
        templating.dump_data_to_temp_yaml(lvs_data, lvs_data_yaml.name)
        logger.info("Creating LocalVolumeSet CR")
        run_cmd(f"oc create -f {lvs_data_yaml.name}")
    else:
        # Retrieve NVME device path ID for each worker node
        device_paths = get_device_paths(worker_names)

        # Pull local volume yaml data
        logger.info("Pulling LocalVolume CR data from yaml")
        lv_data = templating.load_yaml(constants.LOCAL_VOLUME_YAML)

        # Set local-volume namespace
        lv_data["metadata"]["namespace"] = lso_namespace

        # Set storage class
        logger.info(
            "Updating LocalVolume CR data with LSO storageclass: %s", storageclass
        )
        for scd in lv_data["spec"]["storageClassDevices"]:
            scd["storageClassName"] = storageclass

        # Update local volume data with NVME IDs
        logger.info("Updating LocalVolume CR data with device paths: %s", device_paths)
        lv_data["spec"]["storageClassDevices"][0]["devicePaths"] = device_paths

        # Create temp yaml file and create local volume
        lv_data_yaml = tempfile.NamedTemporaryFile(
            mode="w+", prefix="local_volume", delete=False
        )
        templating.dump_data_to_temp_yaml(lv_data, lv_data_yaml.name)
        logger.info("Creating LocalVolume CR")
        run_cmd(f"oc create -f {lv_data_yaml.name}")
    logger.info("Waiting 30 seconds for PVs to create")
    storage_class_device_count = 1
    if platform == constants.AWS_PLATFORM:
        storage_class_device_count = 2
    verify_pvs_created(len(worker_names) * storage_class_device_count)
예제 #9
0
    def flexy_post_processing(self):
        """
        Perform a few actions required after flexy execution:
        - update global pull-secret
        - login to mirror registry (disconected cluster)
        - configure proxy server (disconnected cluster)
        - configure ntp (if required)
        """
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get("kubeconfig_location"))

        # Update kubeconfig with proxy-url (if client_http_proxy
        # configured) to redirect client access through proxy server.
        # Since flexy-dir is already copied to cluster-dir, we will update
        # kubeconfig on both places.
        flexy_kubeconfig = os.path.join(
            self.flexy_host_dir,
            constants.FLEXY_RELATIVE_CLUSTER_DIR,
            "auth/kubeconfig",
        )
        update_kubeconfig_with_proxy_url_for_client(kubeconfig)
        update_kubeconfig_with_proxy_url_for_client(flexy_kubeconfig)

        # load cluster info
        load_cluster_info()

        # Download terraform binary based on version used by Flexy and
        # update the installer path in ENV_DATA
        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        terraform_tfstate = os.path.join(terraform_data_dir,
                                         "terraform.tfstate")
        with open(terraform_tfstate, "r") as fd:
            ttc = hcl.load(fd)
            terraform_version = ttc.get("terraform_version",
                                        config.DEPLOYMENT["terraform_version"])
        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA["terraform_installer"] = terraform_installer

        # Download terraform ignition provider
        # ignition provider dependancy from OCP 4.6
        ocp_version = get_ocp_version()
        if Version.coerce(ocp_version) >= Version.coerce("4.6"):
            get_terraform_ignition_provider(terraform_data_dir)

        # if on disconnected cluster, perform required tasks
        pull_secret_path = os.path.join(constants.DATA_DIR, "pull-secret")
        if config.DEPLOYMENT.get("disconnected"):
            # login to mirror registry
            login_to_mirror_registry(pull_secret_path)
            # configure additional allowed domains in proxy
            configure_allowed_domains_in_proxy()

        # update pull-secret
        secret_cmd = (f"oc set data secret/pull-secret "
                      f"--kubeconfig {kubeconfig} "
                      f"-n {constants.OPENSHIFT_CONFIG_NAMESPACE} "
                      f"--from-file=.dockerconfigjson={pull_secret_path}")
        exec_cmd(secret_cmd)

        if not config.ENV_DATA.get("skip_ntp_configuration", False):
            ntp_cmd = (f"oc --kubeconfig {kubeconfig} "
                       f"create -f {constants.NTP_CHRONY_CONF}")
            logger.info("Creating NTP chrony")
            exec_cmd(ntp_cmd)
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")
예제 #10
0
def create_catalog_source(image=None, ignore_upgrade=False):
    """
    This prepare catalog source manifest for deploy OCS operator from
    quay registry.

    Args:
        image (str): Image of ocs registry.
        ignore_upgrade (bool): Ignore upgrade parameter.

    """
    if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
        link_all_sa_and_secret(constants.OCS_SECRET,
                               constants.MARKETPLACE_NAMESPACE)
    logger.info("Adding CatalogSource")
    if not image:
        image = config.DEPLOYMENT.get("ocs_registry_image", "")
    if config.DEPLOYMENT.get("stage_rh_osbs"):
        image = config.DEPLOYMENT.get("stage_index_image",
                                      constants.OSBS_BOUNDLE_IMAGE)
        osbs_image_tag = config.DEPLOYMENT.get("stage_index_image_tag",
                                               f"v{get_ocp_version()}")
        image += f":{osbs_image_tag}"
        run_cmd("oc patch image.config.openshift.io/cluster --type merge -p '"
                '{"spec": {"registrySources": {"insecureRegistries": '
                '["registry-proxy.engineering.redhat.com"]}}}\'')
        run_cmd(
            f"oc apply -f {constants.STAGE_IMAGE_CONTENT_SOURCE_POLICY_YAML}")
        logger.info(
            "Sleeping for 60 sec to start update machineconfigpool status")
        time.sleep(60)
        wait_for_machineconfigpool_status("all", timeout=1800)
    if not ignore_upgrade:
        upgrade = config.UPGRADE.get("upgrade", False)
    else:
        upgrade = False
    image_and_tag = image.split(":")
    image = image_and_tag[0]
    image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None
    if not image_tag and config.REPORTING.get("us_ds") == "DS":
        image_tag = get_latest_ds_olm_tag(upgrade,
                                          latest_tag=config.DEPLOYMENT.get(
                                              "default_latest_tag", "latest"))

    platform = config.ENV_DATA.get("platform").lower()
    if platform == constants.IBM_POWER_PLATFORM:
        # TEMP Hack... latest-stable-4.6 does not have ppc64le bits.
        image_tag = "latest-4.6"
    catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML)
    cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME
    change_cs_condition = ((image or image_tag)
                           and catalog_source_data["kind"] == "CatalogSource"
                           and catalog_source_data["metadata"]["name"]
                           == cs_name)
    if change_cs_condition:
        default_image = config.DEPLOYMENT["default_ocs_registry_image"]
        image = image if image else default_image.split(":")[0]
        catalog_source_data["spec"][
            "image"] = f"{image}:{image_tag if image_tag else 'latest'}"
    catalog_source_manifest = tempfile.NamedTemporaryFile(
        mode="w+", prefix="catalog_source_manifest", delete=False)
    templating.dump_data_to_temp_yaml(catalog_source_data,
                                      catalog_source_manifest.name)
    run_cmd(f"oc apply -f {catalog_source_manifest.name}", timeout=2400)
    catalog_source = CatalogSource(
        resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME,
        namespace=constants.MARKETPLACE_NAMESPACE,
    )
    # Wait for catalog source is ready
    catalog_source.wait_for_state("READY")
예제 #11
0
def prune_and_mirror_index_image(
    index_image, mirrored_index_image, packages, icsp=None
):
    """
    Prune given index image and push it to mirror registry, mirror all related
    images to mirror registry and create relevant imageContentSourcePolicy

    Args:
        index_image (str): index image which will be pruned and mirrored
        mirrored_index_image (str): mirrored index image which will be pushed to
            mirror registry
        packages (list): list of packages to keep
        icsp (dict): ImageContentSourcePolicy used for mirroring (workaround for
            stage images, which are pointing to different registry than they
            really are)

    Returns:
        str: path to generated catalogSource.yaml file

    """
    get_opm_tool()
    pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")

    # prune an index image
    logger.info(
        f"Prune index image {index_image} -> {mirrored_index_image} "
        f"(packages: {', '.join(packages)})"
    )
    cmd = (
        f"opm index prune -f {index_image} "
        f"-p {','.join(packages)} "
        f"-t {mirrored_index_image}"
    )
    if config.DEPLOYMENT.get("opm_index_prune_binary_image"):
        cmd += (
            f" --binary-image {config.DEPLOYMENT.get('opm_index_prune_binary_image')}"
        )
    # opm tool doesn't have --authfile parameter, we have to supply auth
    # file through env variable
    os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path
    exec_cmd(cmd)

    # login to mirror registry
    login_to_mirror_registry(pull_secret_path)

    # push pruned index image to mirror registry
    logger.info(f"Push pruned index image to mirror registry: {mirrored_index_image}")
    cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}"
    exec_cmd(cmd)

    # mirror related images (this might take very long time)
    logger.info(f"Mirror images related to index image: {mirrored_index_image}")
    cmd = (
        f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure "
        f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*' --max-per-registry=2"
    )
    oc_acm_result = exec_cmd(cmd, timeout=7200)

    for line in oc_acm_result.stdout.decode("utf-8").splitlines():
        if "wrote mirroring manifests to" in line:
            break
    else:
        raise NotFoundError(
            "Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command."
        )
    mirroring_manifests_dir = line.replace("wrote mirroring manifests to ", "")
    logger.debug(f"Mirrored manifests directory: {mirroring_manifests_dir}")

    if icsp:
        # update mapping.txt file with urls updated based on provided
        # imageContentSourcePolicy
        mapping_file = os.path.join(
            f"{mirroring_manifests_dir}",
            "mapping.txt",
        )
        with open(mapping_file) as mf:
            mapping_file_content = []
            for line in mf:
                # exclude mirrored_index_image
                if mirrored_index_image in line:
                    continue
                # apply any matching policy to all lines from mapping file
                for policy in icsp["spec"]["repositoryDigestMirrors"]:
                    # we use only first defined mirror for particular source,
                    # because we don't use any ICSP with more mirrors for one
                    # source and it will make the logic very complex and
                    # confusing
                    line = line.replace(policy["source"], policy["mirrors"][0])
                mapping_file_content.append(line)
        # write mapping file to disk
        mapping_file_updated = os.path.join(
            f"{mirroring_manifests_dir}",
            "mapping_updated.txt",
        )
        with open(mapping_file_updated, "w") as f:
            f.writelines(mapping_file_content)
        # mirror images based on the updated mapping file
        # ignore errors, because some of the images might be already mirrored
        # via the `oc adm catalog mirror ...` command and not available on the
        # mirror
        exec_cmd(
            f"oc image mirror --filter-by-os='.*' -f {mapping_file_updated} "
            f"--insecure --registry-config={pull_secret_path} "
            "--max-per-registry=2 --continue-on-error=true --skip-missing=true",
            timeout=3600,
            ignore_error=True,
        )

    # create ImageContentSourcePolicy
    icsp_file = os.path.join(
        f"{mirroring_manifests_dir}",
        "imageContentSourcePolicy.yaml",
    )
    exec_cmd(f"oc apply -f {icsp_file}")
    logger.info("Sleeping for 60 sec to start update machineconfigpool status")
    time.sleep(60)
    wait_for_machineconfigpool_status("all")

    cs_file = os.path.join(
        f"{mirroring_manifests_dir}",
        "catalogSource.yaml",
    )
    return cs_file
예제 #12
0
def configure_custom_ingress_cert():
    """
    Configure custom SSL certificate for ingress. If the certificate doesn't
    exists, generate new one signed by automatic certificate signing service.

    Raises:
        ConfigurationError: when some required parameter is not configured

    """
    logger.info("Configure custom ingress certificate")
    base_domain = config.ENV_DATA["base_domain"]
    cluster_name = config.ENV_DATA["cluster_name"]
    apps_domain = f"*.apps.{cluster_name}.{base_domain}"

    ssl_key = config.DEPLOYMENT.get("ingress_ssl_key")
    ssl_cert = config.DEPLOYMENT.get("ingress_ssl_cert")

    signing_service_url = config.DEPLOYMENT.get("cert_signing_service_url")

    if not (os.path.exists(ssl_key) and os.path.exists(ssl_cert)):
        if not signing_service_url:
            msg = (
                "Custom certificate files for ingress doesn't exists and "
                "`DEPLOYMENT['cert_signing_service_url']` is not defined. "
                "Unable to generate custom Ingress certificate!"
            )
            logger.error(msg)
            raise exceptions.ConfigurationError(msg)

        logger.debug(
            f"Files '{ssl_key}' and '{ssl_cert}' doesn't exist, generate certificate"
        )
        cert = OCSCertificate(
            signing_service=signing_service_url,
            cn=apps_domain,
            sans=[f"DNS:{apps_domain}"],
        )
        logger.debug(f"Certificate key: {cert.key}")
        logger.debug(f"Certificate: {cert.crt}")
        cert.save_key(ssl_key)
        cert.save_crt(ssl_cert)
        logger.info(f"Certificate saved to '{ssl_cert}' and key to '{ssl_key}'")

    ssl_ca_cert = get_root_ca_cert()
    if ssl_ca_cert:
        logger.debug(f"Configure '{ssl_ca_cert}' for proxy configuration object")
        cmd = (
            "oc create configmap ocs-ca -n openshift-config "
            f"--from-file=ca-bundle.crt={ssl_ca_cert}"
        )
        exec_cmd(cmd)
        cmd = (
            "oc patch proxy/cluster --type=merge "
            '--patch=\'{"spec":{"trustedCA":{"name":"ocs-ca"}}}\''
        )
        exec_cmd(cmd)

    logger.debug(f"Configuring '{ssl_key}' and '{ssl_cert}' for ingress")
    cmd = (
        "oc create secret tls ocs-cert -n openshift-ingress "
        f"--cert={ssl_cert} --key={ssl_key}"
    )
    exec_cmd(cmd)

    cmd = (
        "oc patch ingresscontroller.operator default -n openshift-ingress-operator "
        '--type=merge -p \'{"spec":{"defaultCertificate": {"name": "ocs-cert"}}}\''
    )
    exec_cmd(cmd)
    wait_for_machineconfigpool_status("all", timeout=1800)
예제 #13
0
def create_catalog_source(image=None, ignore_upgrade=False):
    """
    This prepare catalog source manifest for deploy OCS operator from
    quay registry.

    Args:
        image (str): Image of ocs registry.
        ignore_upgrade (bool): Ignore upgrade parameter.

    """
    # Because custom catalog source will be called: redhat-operators, we need to disable
    # default sources. This should not be an issue as OCS internal registry images
    # are now based on OCP registry image
    disable_specific_source(constants.OPERATOR_CATALOG_SOURCE_NAME)
    logger.info("Adding CatalogSource")
    if not image:
        image = config.DEPLOYMENT.get("ocs_registry_image", "")
    if config.DEPLOYMENT.get("stage_rh_osbs"):
        image = config.DEPLOYMENT.get("stage_index_image", constants.OSBS_BOUNDLE_IMAGE)
        ocp_version = version.get_semantic_ocp_version_from_config()
        osbs_image_tag = config.DEPLOYMENT.get(
            "stage_index_image_tag", f"v{ocp_version}"
        )
        image += f":{osbs_image_tag}"
        run_cmd(
            "oc patch image.config.openshift.io/cluster --type merge -p '"
            '{"spec": {"registrySources": {"insecureRegistries": '
            '["registry-proxy.engineering.redhat.com", "registry.stage.redhat.io"]'
            "}}}'"
        )
        run_cmd(f"oc apply -f {constants.STAGE_IMAGE_CONTENT_SOURCE_POLICY_YAML}")
        logger.info("Sleeping for 60 sec to start update machineconfigpool status")
        time.sleep(60)
        wait_for_machineconfigpool_status("all", timeout=1800)
    if not ignore_upgrade:
        upgrade = config.UPGRADE.get("upgrade", False)
    else:
        upgrade = False
    image_and_tag = image.rsplit(":", 1)
    image = image_and_tag[0]
    image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None
    if not image_tag and config.REPORTING.get("us_ds") == "DS":
        image_tag = get_latest_ds_olm_tag(
            upgrade, latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest")
        )

    catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML)
    if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
        create_ocs_secret(constants.MARKETPLACE_NAMESPACE)
        catalog_source_data["spec"]["secrets"] = [constants.OCS_SECRET]
    cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME
    change_cs_condition = (
        (image or image_tag)
        and catalog_source_data["kind"] == "CatalogSource"
        and catalog_source_data["metadata"]["name"] == cs_name
    )
    if change_cs_condition:
        default_image = config.DEPLOYMENT["default_ocs_registry_image"]
        image = image if image else default_image.rsplit(":", 1)[0]
        catalog_source_data["spec"][
            "image"
        ] = f"{image}:{image_tag if image_tag else 'latest'}"
    catalog_source_manifest = tempfile.NamedTemporaryFile(
        mode="w+", prefix="catalog_source_manifest", delete=False
    )
    templating.dump_data_to_temp_yaml(catalog_source_data, catalog_source_manifest.name)
    run_cmd(f"oc apply -f {catalog_source_manifest.name}", timeout=2400)
    catalog_source = CatalogSource(
        resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME,
        namespace=constants.MARKETPLACE_NAMESPACE,
    )
    # Wait for catalog source is ready
    catalog_source.wait_for_state("READY")