Example #1
0
    def patch_sleep_on_mds(self):
        """
        Patch the OSD deployments to sleep and remove the `livenessProbe` parameter,

        """
        mds_dep = get_deployments_having_label(
            label=constants.MDS_APP_LABEL,
            namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
        )
        mds_deployments = [OCS(**mds) for mds in mds_dep]
        for mds in mds_deployments:
            logger.info(
                f"Patching MDS: {mds.name} to remove livenessProbe and setting sleep infinity"
            )
            params = '[{"op":"remove", "path":"/spec/template/spec/containers/0/livenessProbe"}]'
            self.dep_ocp.patch(
                resource_name=mds.name,
                params=params,
                format_type="json",
            )
            params = (
                '{"spec": {"template": {"spec": {"containers": '
                '[{"name": "mds", "command": ["sleep", "infinity"], "args": []}]}}}}'
            )
            self.dep_ocp.patch(
                resource_name=mds.name,
                params=params,
            )
        logger.info("Sleeping for 60s and waiting for MDS pods to reach running state")
        time.sleep(60)
        for mds in get_mds_pods(namespace=constants.OPENSHIFT_STORAGE_NAMESPACE):
            wait_for_resource_state(resource=mds, state=constants.STATUS_RUNNING)
Example #2
0
    def patch_sleep_on_osds(self):
        """
        Patch the OSD deployments to sleep and remove the `livenessProbe` parameter,

        """
        osd_dep = get_deployments_having_label(
            label=constants.OSD_APP_LABEL,
            namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
        )
        osd_deployments = [OCS(**osd) for osd in osd_dep]
        for osd in osd_deployments:
            logger.info(
                f"Patching OSD: {osd.name} with livenessProbe and sleep infinity"
            )
            params = '[{"op":"remove", "path":"/spec/template/spec/containers/0/livenessProbe"}]'
            self.dep_ocp.patch(
                resource_name=osd.name,
                params=params,
                format_type="json",
            )
            params = (
                '{"spec": {"template": {"spec": {"containers": [{"name": "osd", "command":'
                ' ["sleep", "infinity"], "args": []}]}}}}'
            )
            self.dep_ocp.patch(
                resource_name=osd.name,
                params=params,
            )
        logger.info(
            "Sleeping for 15 seconds and waiting for OSDs to reach running state"
        )
        time.sleep(15)
        for osd in get_osd_pods():
            wait_for_resource_state(resource=osd, state=constants.STATUS_RUNNING)
Example #3
0
    def deployments_to_revert(self):
        """
        Gets mon, osd and mgr deployments to revert

        Returns:
            tuple: deployment paths to be reverted

        """
        to_revert_patches = (
            get_deployments_having_label(
                label=constants.OSD_APP_LABEL,
                namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
            )
            + get_deployments_having_label(
                label=constants.MON_APP_LABEL,
                namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
            )
            + get_deployments_having_label(
                label=constants.MGR_APP_LABEL,
                namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
            )
        )
        to_revert_mds = get_deployments_having_label(
            label=constants.MDS_APP_LABEL,
            namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
        )
        to_revert_patches_path = []
        to_revert_mds_path = []
        for dep in to_revert_patches:
            to_revert_patches_path.append(
                join(self.backup_dir, dep["metadata"]["name"] + ".yaml")
            )
        for dep in to_revert_mds:
            logger.info(dep)
            to_revert_mds_path.append(
                join(self.backup_dir, dep["metadata"]["name"] + ".yaml")
            )
        return to_revert_patches_path, to_revert_mds_path
Example #4
0
def update_mon_initial_delay():
    """
    Inserts delay on all monitors

    """
    mon_dep = get_deployments_having_label(
        label=constants.MON_APP_LABEL,
        namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
    )
    mon_deployments = [OCS(**mon) for mon in mon_dep]
    for mon in mon_deployments:
        logger.info(f"Updating initialDelaySeconds on {mon.name} deployment")
        insert_delay(mon_dep=mon.name)

    logger.info("Sleeping for mons to get initialized")
    time.sleep(90)
    logger.info("Validating whether all mons reached running state")
    validate_mon_pods()
Example #5
0
    def patch_sleep_on_mon(self):
        """
        Patches sleep to infinity on monitors

        """
        mon_dep = get_deployments_having_label(
            label=constants.MON_APP_LABEL,
            namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
        )
        mon_deployments = [OCS(**mon) for mon in mon_dep]
        for mon in mon_deployments:
            params = (
                '{"spec": {"template": {"spec": {"containers":'
                ' [{"name": "mon", "command": ["sleep", "infinity"], "args": []}]}}}}'
            )
            logger.info(f"Patching monitor: {mon.name} to sleep infinitely")
            self.dep_ocp.patch(
                resource_name=mon.name,
                params=params,
            )
    def test_scale_down_rgw(self, scale_down_to):
        """
        Scale down RGW deployment and do sanity validations

        - Scale down the RGW deployment replicas to 1 or 0
        - If scaled down to 1, check Noobaa health
        - Scale up the RGW replicas back to 2
        - Check Noobaa health

        """
        rgw_deployment = pod.get_deployments_having_label(
            constants.RGW_APP_LABEL, defaults.ROOK_CLUSTER_NAMESPACE)[0]
        rgw_deployment = OCS(**rgw_deployment)

        current_replicas = rgw_deployment.get()["spec"]["replicas"]
        rgw_deployment.ocp.exec_oc_cmd(
            f"scale --replicas={str(scale_down_to)} deployment/{rgw_deployment.name}"
        )
        if scale_down_to > 0:
            self.cl_obj.wait_for_noobaa_health_ok()
        rgw_deployment.ocp.exec_oc_cmd(
            f"scale --replicas={str(current_replicas)} deployment/{rgw_deployment.name}"
        )
        self.cl_obj.wait_for_noobaa_health_ok()
    def test_restart_mgr_while_two_mons_down(self, pvc_factory, pod_factory,
                                             bucket_factory,
                                             rgw_bucket_factory):
        """
        Test Procedure:
        1.Scaling down two mons:
        oc scale --replicas=0 deploy/rook-ceph-mon-a
        oc scale --replicas=0 deploy/rook-ceph-mon-b

        2.Restarting mgr
        oc delete pod -l app=rook-ceph-mgr

        3.sleep 5 seconds

        4.Scaling mons back up
        oc scale --replicas=1 deploy/rook-ceph-mon-a
        oc scale --replicas=1 deploy/rook-ceph-mon-b

        5.sleep 10

        6.Waiting for mgr pod move to running state:
        oc get pod -l app=rook-ceph-mgr

        """
        self.oc = ocp.OCP(kind=constants.DEPLOYMENT,
                          namespace=config.ENV_DATA["cluster_namespace"])
        mons = [
            mon["metadata"]["name"] for mon in get_deployments_having_label(
                constants.MON_APP_LABEL, defaults.ROOK_CLUSTER_NAMESPACE)
        ]
        self.mons_scale = mons[0:2]
        tries = 11
        for index in range(1, tries):
            log.info(f"Scaling down two mons {self.mons_scale}, index={index}")
            for mon_scale in self.mons_scale:
                self.oc.exec_oc_cmd(
                    f"scale --replicas=0 deployment/{mon_scale}")

            log.info(f"Restarting mgr pod, index={index}")
            mgr_pod = get_mgr_pods()
            mgr_pod[0].delete(wait=True)

            time.sleep(5)

            log.info(f"Scaling up two mons {self.mons_scale}, index={index}")
            for mon_scale in self.mons_scale:
                self.oc.exec_oc_cmd(
                    f"scale --replicas=1 deployment/{mon_scale}")

            time.sleep(10)

            log.info(
                f"Waiting for mgr pod move to Running state, index={index}")
            mgr_pod_obj = ocp.OCP(kind=constants.POD,
                                  namespace=defaults.ROOK_CLUSTER_NAMESPACE)
            assert mgr_pod_obj.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=constants.MGR_APP_LABEL,
                resource_count=1,
                timeout=100,
            ), f"Mgr pod did'nt move to Running state after 100 seconds, index={index}"

        log.info("Creating Resources using sanity helpers")
        self.sanity_helpers.create_resources(pvc_factory, pod_factory,
                                             bucket_factory,
                                             rgw_bucket_factory)
        log.info("Deleting Resources using sanity helpers")
        self.sanity_helpers.delete_resources()