def test_monitoring_shutdown_mgr_pod(self, pods):
        """
        Montoring backed by OCS, bring mgr down(replica: 0) for some time
        and check ceph related metrics
        """
        # Check ceph metrics available
        assert (
            check_ceph_metrics_available()
        ), "failed to get results for some metrics before Downscaling deployment mgr to 0"

        # Get pod mge name and mgr deployment
        oc_deployment = ocp.OCP(kind=constants.DEPLOYMENT,
                                namespace=ROOK_CLUSTER_NAMESPACE)
        mgr_deployments = oc_deployment.get(
            selector=constants.MGR_APP_LABEL)["items"]
        mgr = mgr_deployments[0]["metadata"]["name"]
        pod_mgr_name = get_pod_name_by_pattern(
            pattern=mgr, namespace=ROOK_CLUSTER_NAMESPACE)

        log.info(f"Downscaling deployment {mgr} to 0")
        oc_deployment.exec_oc_cmd(f"scale --replicas=0 deployment/{mgr}")

        log.info(f"Wait for a mgr pod {pod_mgr_name[0]} to be deleted")
        oc_pod = ocp.OCP(kind=constants.POD, namespace=ROOK_CLUSTER_NAMESPACE)
        oc_pod.wait_for_delete(resource_name=pod_mgr_name[0])

        log.info(f"Upscaling deployment {mgr} back to 1")
        oc_deployment.exec_oc_cmd(f"scale --replicas=1 deployment/{mgr}")

        log.info("Waiting for mgr pod to be reach Running state")
        oc_pod.wait_for_resource(condition=constants.STATUS_RUNNING,
                                 selector=constants.MGR_APP_LABEL)

        # Check ceph metrics available
        check_ceph_metrics_available_within_time()
def check_ceph_metrics_available_within_time():
    assert (
        check_ceph_metrics_available()
    ), "failed to get results for some metrics after Downscaling and Upscaling deployment mgr"