from ocs_ci.framework import config from ocs_ci.framework.testlib import deployment, polarion_id from ocs_ci.ocs.resources.storage_cluster import ( ocs_install_verification, mcg_only_install_verification, ) from ocs_ci.ocs.utils import get_non_acm_cluster_config from ocs_ci.utility.reporting import get_polarion_id from ocs_ci.utility.utils import is_cluster_running, ceph_health_check from ocs_ci.helpers.sanity_helpers import Sanity, SanityExternalCluster log = logging.getLogger(__name__) @deployment @polarion_id(get_polarion_id()) def test_deployment(pvc_factory, pod_factory): deploy = config.RUN["cli_params"].get("deploy") teardown = config.RUN["cli_params"].get("teardown") if not teardown or deploy: log.info("Verifying OCP cluster is running") assert is_cluster_running(config.ENV_DATA["cluster_path"]) if not config.ENV_DATA["skip_ocs_deployment"]: if config.multicluster: restore_ctx_index = config.cur_index for cluster in get_non_acm_cluster_config(): config.switch_ctx(cluster.MULTICLUSTER["multicluster_index"]) log.info( f"Sanity check for cluster: {cluster.ENV_DATA['cluster_name']}" ) sanity_helpers = Sanity()
def test_worker_node_permanent_shutdown(teardown): """ Test OCS upgrade with disruption of shutting down worker node """ log.info( "Starting disruptive function: test_worker_node_permanent_shutdown") run_ocs_upgrade(operation=worker_node_shutdown, abrupt=False) @pytest.mark.polarion_id("OCS-1558") def test_osd_reboot(teardown): """ OCS Upgrade with node reboot: with 1 OSD going down and back up while upgrade is running """ log.info("Starting disruptive function: test_osd_reboot") run_ocs_upgrade(operation=osd_node_reboot) @ocs_upgrade @polarion_id(get_polarion_id(upgrade=True)) def test_upgrade(): """ Tests upgrade procedure of OCS cluster """ run_ocs_upgrade()