コード例 #1
0
    def setup(self, add_nodes):
        """
        Check that we have the right configurations before we start the test
        """
        osd_pods_before = pod_helpers.get_osd_pods()
        number_of_osd_pods_before = len(osd_pods_before)
        if number_of_osd_pods_before >= constants.MAX_OSDS:
            pytest.skip("We have maximum of OSDs in the cluster")

        # If we use vSphere we may need to add more worker nodes
        # to the cluster before starting the test
        if (config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM
                and number_of_osd_pods_before >= 9):
            num_of_expected_wnodes = 6
            wnodes = node.get_worker_nodes()
            num_of_wnodes = len(wnodes)
            logging.info(
                f"We have {number_of_osd_pods_before} OSDs in the cluster, "
                f"and {num_of_wnodes} worker nodes in the cluster")
            if num_of_wnodes < num_of_expected_wnodes:
                num_of_wnodes_to_add = num_of_expected_wnodes - num_of_wnodes
                logging.info(
                    f"Adding more {num_of_wnodes_to_add} worker nodes to the cluster"
                )
                add_nodes(ocs_nodes=False, node_count=num_of_wnodes_to_add)

            wnodes_not_in_ocs = node.get_worker_nodes_not_in_ocs()
            if wnodes_not_in_ocs:
                logging.info("Label the worker nodes that are not in OCS")
                node.label_nodes(wnodes_not_in_ocs)
コード例 #2
0
ファイル: acm.py プロジェクト: ramkiperiy/ocs-ci
    def submariner_configure_upstream(self):
        """
        Deploy and Configure upstream submariner

        Raises:
            DRPrimaryNotFoundException: If there is no designated primary cluster found

        """
        if self.designated_broker_cluster_index < 0:
            raise DRPrimaryNotFoundException(
                "Designated primary cluster not found")

        # Deploy broker on designated cluster
        # follow this config switch statement carefully to be mindful
        # about the context with which we are performing the operations
        config.switch_ctx(self.designated_broker_cluster_index)
        logger.info(
            f"Switched context: {config.cluster_ctx.ENV_DATA['cluster_name']}")

        deploy_broker_cmd = "deploy-broker"
        try:
            run_subctl_cmd(deploy_broker_cmd)
        except CommandFailed:
            logger.exception("Failed to deploy submariner broker")
            raise

        # Label the gateway nodes on all non acm cluster
        restore_index = config.cur_index
        for cluster in get_non_acm_cluster_config():
            config.switch_ctx(cluster.MULTICLUSTER["multicluster_index"])
            gateway_node = self.get_default_gateway_node()
            label_nodes([gateway_node],
                        constants.SUBMARINER_GATEWAY_NODE_LABEL)
        config.switch_ctx(restore_index)

        # Join all the clusters (except ACM cluster in case of hub deployment)
        for cluster in config.clusters:
            print(len(config.clusters))
            cluster_index = cluster.MULTICLUSTER["multicluster_index"]
            if cluster_index != config.get_acm_index():
                join_cmd = (f"join --kubeconfig {cluster.RUN['kubeconfig']} "
                            f"{config.ENV_DATA['submariner_info_file']} "
                            f"--clusterid c{self.cluster_seq} --natt=false")
                try:
                    run_subctl_cmd(join_cmd, )
                    logger.info(
                        f"Subctl join succeded for {cluster.ENV_DATA['cluster_name']}"
                    )
                except CommandFailed:
                    logger.exception("Cluster failed to join")
                    raise

                self.cluster_seq = self.cluster_seq + 1
                self.dr_only_list.append(cluster_index)
        # Verify submariner connectivity between clusters(excluding ACM)
        kubeconf_list = []
        for i in self.dr_only_list:
            kubeconf_list.append(config.clusters[i].RUN["kubeconfig"])
        connct_check = f"verify {' '.join(kubeconf_list)} --only connectivity"
        run_subctl_cmd(connct_check)
コード例 #3
0
ファイル: uninstall.py プロジェクト: sidhant-agrawal/ocs-ci
def uninstall_ocs():
    """
    The function uninstalls the OCS operator from a openshift
    cluster and removes all its settings and dependencies

    """
    ocp_obj = ocp.OCP()

    log.info("deleting volume snapshots")
    vs_ocp_obj = ocp.OCP(kind=constants.VOLUMESNAPSHOT)
    vs_list = vs_ocp_obj.get(all_namespaces=True)["items"]
    for vs in vs_list:
        vs_obj = ocp.OCP(kind=constants.VOLUMESNAPSHOT,
                         namespace=vs.get("metadata").get("namespace"))
        vs_obj.delete(resource_name=vs.get("metadata").get("name"))

    log.info("queering for OCS PVCs")
    provisioners = constants.OCS_PROVISIONERS
    sc_list = [
        sc for sc in get_all_storageclass()
        if sc.get("provisioner") in provisioners
    ]

    pvc_to_delete = []
    for sc in sc_list:
        pvc_to_delete.extend(pvc for pvc in get_all_pvcs_in_storageclass(
            sc.get("metadata").get("name")) if "noobaa" not in pvc.name)

    if config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM:
        log.info("Deleting OCS PVCs")
        for pvc in pvc_to_delete:
            log.info(f"Deleting PVC: {pvc.name}")
            pvc.delete()
        rosa.delete_odf_addon(config.ENV_DATA["cluster_name"])
        return None
    log.info("Removing monitoring stack from OpenShift Container Storage")
    remove_monitoring_stack_from_ocs()

    log.info(
        "Removing OpenShift Container Platform registry from OpenShift Container Storage"
    )
    remove_ocp_registry_from_ocs(config.ENV_DATA["platform"])

    log.info(
        "Removing the cluster logging operator from OpenShift Container Storage"
    )
    try:
        remove_cluster_logging_operator_from_ocs()
    except CommandFailed:
        log.info("No cluster logging found")

    log.info("Deleting OCS PVCs")
    for pvc in pvc_to_delete:
        log.info(f"Deleting PVC: {pvc.name}")
        pvc.delete()

    storage_cluster = ocp.OCP(
        kind=constants.STORAGECLUSTER,
        resource_name=constants.DEFAULT_CLUSTERNAME,
        namespace="openshift-storage",
    )

    log.info("Checking for local storage")
    lso_sc = None
    if check_local_volume_local_volume_set():
        "Local volume was found. Will be removed later"
        lso_sc = (storage_cluster.get().get("spec").get("storageDeviceSets")[0]
                  .get("dataPVCTemplate").get("spec").get("storageClassName"))

    cleanup_policy = (storage_cluster.get().get("metadata").get(
        "annotations").get("uninstall.ocs.openshift.io/cleanup-policy"))

    log.info("Deleting storageCluster object")
    storage_cluster.delete(resource_name=constants.DEFAULT_CLUSTERNAME)

    if cleanup_policy == "delete":
        log.info("Cleanup policy set to delete. checking cleanup pods")
        cleanup_pods = [
            pod for pod in get_all_pods() if "cluster-cleanup-job" in pod.name
        ]
        for pod in cleanup_pods:
            while pod.get().get("status").get("phase") != "Succeeded":
                log.info(f"waiting for cleanup pod {pod.name} to complete")
                TimeoutSampler(timeout=10, sleep=30)
            log.info(f"Cleanup pod {pod.name} completed successfully ")
        # no need to confirm var/vib/rook was deleted from nodes if all cleanup pods are completed.
    else:
        log.info("Cleanup policy set to retain. skipping nodes cleanup")

    log.info("Deleting openshift-storage namespace")
    ocp_obj.delete_project(constants.OPENSHIFT_STORAGE_NAMESPACE)
    ocp_obj.wait_for_delete(constants.OPENSHIFT_STORAGE_NAMESPACE)
    switch_to_project(constants.DEFAULT_NAMESPACE)

    # step 10: TODO remove crypto from nodes.
    """for node in storage_node_list:
        log.info(f"removing encryption from {node}")
        ocp_obj.exec_oc_debug_cmd(node=node, cmd_list=[])"""

    if lso_sc is not None:
        log.info("Removing LSO")
        try:
            uninstall_lso(lso_sc)
        except Exception as e:
            log.info(f"LSO removal failed.{e}")

    log.info("deleting noobaa storage class")
    noobaa_sc = ocp.OCP(kind=constants.STORAGECLASS)
    noobaa_sc.delete(resource_name=constants.NOOBAA_SC)

    nodes = get_all_nodes()
    node_objs = get_node_objs(nodes)

    log.info("Unlabeling storage nodes")
    label_nodes(nodes=node_objs,
                label=constants.OPERATOR_NODE_LABEL[:-3] + "-")
    label_nodes(nodes=node_objs, label=constants.TOPOLOGY_ROOK_LABEL + "-")

    log.info("Removing taints from storage nodes")
    taint_nodes(nodes=nodes, taint_label=constants.OPERATOR_NODE_TAINT + "-")

    log.info("Deleting remaining OCS PVs (if there are any)")
    try:
        rbd_pv = ocp.OCP(kind=constants.PV,
                         resource_name="ocs-storagecluster-ceph-rbd")
        fs_pv = ocp.OCP(kind=constants.PV,
                        resource_name="ocs-storagecluster-cephfs")
        rbd_pv.delete()
        fs_pv.delete()
        log.info("OCS PVs deleted")
    except Exception as e:
        log.info(f"OCS PV(s) not found. {e}")

    log.info("Removing CRDs")
    crd_list = [
        "backingstores.noobaa.io",
        "bucketclasses.noobaa.io",
        "cephblockpools.ceph.rook.io",
        "cephclusters.ceph.rook.io",
        "cephfilesystems.ceph.rook.io",
        "cephnfses.ceph.rook.io",
        "cephobjectstores.ceph.rook.io",
        "cephobjectstoreusers.ceph.rook.io",
        "noobaas.noobaa.io",
        "ocsinitializations.ocs.openshift.io",
        "storageclusters.ocs.openshift.io",
        "cephclients.ceph.rook.io",
        "cephobjectrealms.ceph.rook.io",
        "cephobjectzonegroups.ceph.rook.io",
        "cephobjectzones.ceph.rook.io",
        "cephrbdmirrors.ceph.rook.io",
    ]

    for crd in crd_list:
        try:
            ocp_obj.exec_oc_cmd(f"delete crd {crd} --timeout=300m")
        except Exception:
            log.info(f"crd {crd} was not found")