Ejemplo n.º 1
0
    def flexy_post_processing(self):
        """
        Perform a few actions required after flexy execution:
        - update global pull-secret
        - login to mirror registry (disconected cluster)
        - configure proxy server (disconnected cluster)
        - configure ntp (if required)
        """
        # Apply pull secrets on ocp cluster
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get("kubeconfig_location"))
        # load cluster info
        load_cluster_info()

        # if on disconnected cluster, perform required tasks
        pull_secret_path = os.path.join(constants.DATA_DIR, "pull-secret")
        if config.DEPLOYMENT.get("disconnected"):
            # login to mirror registry
            login_to_mirror_registry(pull_secret_path)
            # configure additional allowed domains in proxy
            configure_allowed_domains_in_proxy()

        # update pull-secret
        secret_cmd = (f"oc set data secret/pull-secret "
                      f"--kubeconfig {kubeconfig} "
                      f"-n {constants.OPENSHIFT_CONFIG_NAMESPACE} "
                      f"--from-file=.dockerconfigjson={pull_secret_path}")
        exec_cmd(secret_cmd)

        if not config.ENV_DATA.get("skip_ntp_configuration", False):
            ntp_cmd = (f"oc --kubeconfig {kubeconfig} "
                       f"create -f {constants.NTP_CHRONY_CONF}")
            logger.info("Creating NTP chrony")
            exec_cmd(ntp_cmd)
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")
Ejemplo n.º 2
0
    def flexy_post_processing(self):
        """
        Perform a few actions required after flexy execution:
        - update global pull-secret
        - login to mirror registry (disconected cluster)
        - configure proxy server (disconnected cluster)
        - configure ntp (if required)
        """
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get("kubeconfig_location"))

        # Update kubeconfig with proxy-url (if client_http_proxy
        # configured) to redirect client access through proxy server.
        # Since flexy-dir is already copied to cluster-dir, we will update
        # kubeconfig on both places.
        flexy_kubeconfig = os.path.join(
            self.flexy_host_dir,
            constants.FLEXY_RELATIVE_CLUSTER_DIR,
            "auth/kubeconfig",
        )
        update_kubeconfig_with_proxy_url_for_client(kubeconfig)
        update_kubeconfig_with_proxy_url_for_client(flexy_kubeconfig)

        # load cluster info
        load_cluster_info()

        # Download terraform binary based on version used by Flexy and
        # update the installer path in ENV_DATA
        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        terraform_tfstate = os.path.join(terraform_data_dir,
                                         "terraform.tfstate")
        with open(terraform_tfstate, "r") as fd:
            ttc = hcl.load(fd)
            terraform_version = ttc.get("terraform_version",
                                        config.DEPLOYMENT["terraform_version"])
        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA["terraform_installer"] = terraform_installer

        # Download terraform ignition provider
        # ignition provider dependancy from OCP 4.6
        ocp_version = get_ocp_version()
        if Version.coerce(ocp_version) >= Version.coerce("4.6"):
            get_terraform_ignition_provider(terraform_data_dir)

        # if on disconnected cluster, perform required tasks
        pull_secret_path = os.path.join(constants.DATA_DIR, "pull-secret")
        if config.DEPLOYMENT.get("disconnected"):
            # login to mirror registry
            login_to_mirror_registry(pull_secret_path)
            # configure additional allowed domains in proxy
            configure_allowed_domains_in_proxy()

        # update pull-secret
        secret_cmd = (f"oc set data secret/pull-secret "
                      f"--kubeconfig {kubeconfig} "
                      f"-n {constants.OPENSHIFT_CONFIG_NAMESPACE} "
                      f"--from-file=.dockerconfigjson={pull_secret_path}")
        exec_cmd(secret_cmd)

        if not config.ENV_DATA.get("skip_ntp_configuration", False):
            ntp_cmd = (f"oc --kubeconfig {kubeconfig} "
                       f"create -f {constants.NTP_CHRONY_CONF}")
            logger.info("Creating NTP chrony")
            exec_cmd(ntp_cmd)
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")
Ejemplo n.º 3
0
    def deploy_ocs(self):
        """
        Handle OCS deployment, since OCS deployment steps are common to any
        platform, implementing OCS deployment here in base class.
        """
        set_registry_to_managed_state()
        image = None
        ceph_cluster = ocp.OCP(kind="CephCluster", namespace=self.namespace)
        try:
            ceph_cluster.get().get("items")[0]
            logger.warning("OCS cluster already exists")
            return
        except (IndexError, CommandFailed):
            logger.info("Running OCS basic installation")

        # disconnected installation?
        load_cluster_info()
        if config.DEPLOYMENT.get("disconnected"):
            image = prepare_disconnected_ocs_deployment()

        if config.DEPLOYMENT["external_mode"]:
            self.deploy_with_external_mode()
        else:
            self.deploy_ocs_via_operator(image)
            pod = ocp.OCP(kind=constants.POD, namespace=self.namespace)
            cfs = ocp.OCP(kind=constants.CEPHFILESYSTEM,
                          namespace=self.namespace)
            # Check for Ceph pods
            mon_pod_timeout = (900 if self.platform
                               == constants.IBMCLOUD_PLATFORM else 600)
            assert pod.wait_for_resource(
                condition="Running",
                selector="app=rook-ceph-mon",
                resource_count=3,
                timeout=mon_pod_timeout,
            )
            assert pod.wait_for_resource(condition="Running",
                                         selector="app=rook-ceph-mgr",
                                         timeout=600)
            assert pod.wait_for_resource(
                condition="Running",
                selector="app=rook-ceph-osd",
                resource_count=3,
                timeout=600,
            )

            # validate ceph mon/osd volumes are backed by pvc
            validate_cluster_on_pvc()

            # validate PDB creation of MON, MDS, OSD pods
            validate_pdb_creation()

            # Creating toolbox pod
            setup_ceph_toolbox()

            assert pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector="app=rook-ceph-tools",
                resource_count=1,
                timeout=600,
            )

            if not config.COMPONENTS["disable_cephfs"]:
                # Check for CephFilesystem creation in ocp
                cfs_data = cfs.get()
                cfs_name = cfs_data["items"][0]["metadata"]["name"]

                if helpers.validate_cephfilesystem(cfs_name):
                    logger.info("MDS deployment is successful!")
                    defaults.CEPHFILESYSTEM_NAME = cfs_name
                else:
                    logger.error("MDS deployment Failed! Please check logs!")

        # Change monitoring backend to OCS
        if config.ENV_DATA.get("monitoring_enabled") and config.ENV_DATA.get(
                "persistent-monitoring"):
            setup_persistent_monitoring()
        elif config.ENV_DATA.get("monitoring_enabled") and config.ENV_DATA.get(
                "telemeter_server_url"):
            # Create configmap cluster-monitoring-config to reconfigure
            # telemeter server url when 'persistent-monitoring' is False
            create_configmap_cluster_monitoring_pod(
                telemeter_server_url=config.ENV_DATA["telemeter_server_url"])

        if not config.COMPONENTS["disable_cephfs"]:
            # Change registry backend to OCS CEPHFS RWX PVC
            registry.change_registry_backend_to_ocs()

        # Verify health of ceph cluster
        logger.info("Done creating rook resources, waiting for HEALTH_OK")
        try:
            ceph_health_check(namespace=self.namespace, tries=30, delay=10)
        except CephHealthException as ex:
            err = str(ex)
            logger.warning(f"Ceph health check failed with {err}")
            if "clock skew detected" in err:
                logger.info(f"Changing NTP on compute nodes to"
                            f" {constants.RH_NTP_CLOCK}")
                if self.platform == constants.VSPHERE_PLATFORM:
                    update_ntp_compute_nodes()
                assert ceph_health_check(namespace=self.namespace,
                                         tries=60,
                                         delay=10)

        # patch gp2/thin storage class as 'non-default'
        self.patch_default_sc_to_non_default()