def test_respin_elasticsearch_pod(self, create_pvc_and_deploymentconfig_pod):
        """
        Test to verify respin of elasticsearch pod has no functional impact
        on logging backed by OCS.
        """

        elasticsearch_pod_obj = self.get_elasticsearch_pod_obj()

        # Respin the elastic-search pod
        elasticsearch_pod_obj.delete(force=True)

        # Checks the health of logging cluster after a respin
        assert ocp_logging_obj.check_health_of_clusterlogging()

        # Checks .operations index
        es_pod_obj = self.get_elasticsearch_pod_obj()

        operations_index = es_pod_obj.exec_cmd_on_pod(
            command='es_util --query=.operations.*/_search?pretty', out_yaml_format=True
        )
        assert operations_index['_shards']['failed'] == 0, (
            "Unable to access the logs of .operations from ES pods"
        )

        # Creates new-project and app-pod and checks the logs are retained
        pod_obj, pvc_obj = create_pvc_and_deploymentconfig_pod

        self.validate_project_exists(pvc_obj)
def test_fixture(request):
    """
    Setup and teardown
    * The setup will deploy openshift-logging in the cluster
    * The teardown will uninstall cluster-logging from the cluster
    """

    def finalizer():
        teardown(cbp_obj, sc_obj)

    request.addfinalizer(finalizer)

    # Deploys elastic-search operator on the project openshift-operators-redhat
    ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
    assert ocp_logging_obj.create_elasticsearch_operator_group(
        yaml_file=constants.EO_OG_YAML,
        resource_name='openshift-operators-redhat'
    )
    assert ocp_logging_obj.set_rbac(
        yaml_file=constants.EO_RBAC_YAML, resource_name='prometheus-k8s'
    )
    assert ocp_logging_obj.create_elasticsearch_subscription(constants.EO_SUB_YAML)

    # Deploys cluster-logging operator on the project openshift-logging
    ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
    assert ocp_logging_obj.create_clusterlogging_operator_group(
        yaml_file=constants.CL_OG_YAML
    )
    assert ocp_logging_obj.create_clusterlogging_subscription(
        yaml_file=constants.CL_SUB_YAML
    )

    # Creates storage class
    cbp_obj = helpers.create_ceph_block_pool()
    sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=cbp_obj.name,
        secret_name=constants.DEFAULT_SECRET,
        reclaim_policy="Delete"
    )
    assert ocp_logging_obj.create_instance_in_clusterlogging(sc_name=sc_obj.name)

    # Check the health of the cluster-logging
    assert ocp_logging_obj.check_health_of_clusterlogging()

    csv_obj = CSV(
        kind=constants.CLUSTER_SERVICE_VERSION, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
    )

    get_version = csv_obj.get(out_yaml_format=True)
    for i in range(len(get_version['items'])):
        if '4.2.0' in get_version['items'][i]['metadata']['name']:
            logger.info("The version of operators is 4.2.0")
            logger.info(get_version['items'][i]['metadata']['name'])
        else:
            logger.error("The version is not 4.2.0")
    def test_respin_osd_pods_to_verify_logging(self, create_pvc_and_deploymentconfig_pod):
        """
        This function creates projects before and after respin of osd
        and verify project existence in EFK stack.
        1. Creates new project with PVC and app-pods
        2. Respins osd
        3. Logs into the EFK stack and checks for the health of cluster-logging
        4. Logs into the EFK stack and checks project existence
        5. Checks for the shards of the project in the EFK stack
        6. Creates new project and checks the existence again
        """

        # Create 1st project and app_pod
        dc_pod_obj, dc_pvc_obj = create_pvc_and_deploymentconfig_pod

        project1 = dc_pvc_obj.project.namespace

        # Delete the OSD pod
        disruption = disruption_helpers.Disruptions()
        disruption.set_resource(resource='osd')
        disruption.delete_resource()

        # Check the health of the cluster-logging
        assert ocp_logging_obj.check_health_of_clusterlogging()

        # Check for the 1st project created in EFK stack before the respin
        self.validate_project_exists(dc_pvc_obj)

        # Check the files in the project
        elasticsearch_pod_obj = self.get_elasticsearch_pod_obj()

        project1_filecount = elasticsearch_pod_obj.exec_cmd_on_pod(
            command=f'es_util --query=project.{project1}.*/_count'
        )
        assert project1_filecount['_shards']['successful'] != 0, (
            f"No files found in project {project1}"
        )
        logger.info(f'Total number of files in project 1 {project1_filecount}')

        # Create another app_pod in new project
        pod_obj, pvc_obj = create_pvc_and_deploymentconfig_pod

        project2 = pvc_obj.project.namespace

        # Check the 2nd project exists in the EFK stack
        self.validate_project_exists(pvc_obj)

        project2_filecount = elasticsearch_pod_obj.exec_cmd_on_pod(
            command=f'es_util --query=project.{project2}.*/_count', out_yaml_format=True
        )
        assert project2_filecount['_shards']['successful'] != 0, (
            f"No files found in project {project2}"
        )
        logger.info(f'Total number of files in the project 2 {project2_filecount}')
Example #4
0
def check_cluster_logging():
    """
    Few checks to assert for before and after upgrade
    1. Checks for pods in openshift-logging project
    2. Checks for the health of logging cluster before upgrade

    """

    assert get_all_pods(namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
                        ), "Some or all pods missing in namespace"

    assert check_health_of_clusterlogging(), "Cluster is not Healthy"
def create_instance():
    """
    The function is used to create instance for
    cluster-logging
    """

    # Create instance
    assert ocp_logging_obj.create_instance_in_clusterlogging()

    # Check the health of the cluster-logging
    assert ocp_logging_obj.check_health_of_clusterlogging()

    csv_obj = CSV(namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)

    # Get the CSV installed
    get_csv = csv_obj.get(out_yaml_format=True)
    logger.info(f'The installed CSV is {get_csv}')
    def test_respin_osd_pods_to_verify_logging(
        self, create_pvc_and_deploymentconfig_pod
    ):
        """
        This function creates projects before and after respin of osd
        and verify project existence in EFK stack.
        1. Creates new project with PVC and app-pods
        2. Respins osd
        3. Logs into the EFK stack and checks for the health of cluster-logging
        4. Logs into the EFK stack and checks project existence
        5. Checks for the shards of the project in the EFK stack
        6. Creates new project and checks the existence again
        """

        # Create 1st project and app_pod
        dc_pod_obj, dc_pvc_obj = create_pvc_and_deploymentconfig_pod

        project1 = dc_pvc_obj.project.namespace

        # Delete the OSD pod
        disruption = disruption_helpers.Disruptions()
        disruption.set_resource(resource="osd")
        disruption.delete_resource()

        # Check the health of the cluster-logging
        assert ocp_logging_obj.check_health_of_clusterlogging()

        # Check for the 1st project created in EFK stack before the respin
        self.validate_project_exists(project1)

        # Check the files in the project
        self.check_filecount_in_project(project1)

        # Create another app_pod in new project
        pod_obj, pvc_obj = create_pvc_and_deploymentconfig_pod

        project2 = pvc_obj.project.namespace

        # Check the 2nd project exists in the EFK stack
        self.validate_project_exists(project2)

        self.check_filecount_in_project(project2)
def test_fixture(request):
    """
    Setup and teardown
    * The setup will deploy openshift-logging in the cluster
    * The teardown will uninstall cluster-logging from the cluster
    """
    def finalizer():
        teardown(cbp_obj, sc_obj)

    request.addfinalizer(finalizer)

    # Deploys elastic-search operator on the project openshift-operators-redhat
    ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
    assert ocp_logging_obj.create_elasticsearch_operator_group(
        yaml_file=constants.EO_OG_YAML,
        resource_name='openshift-operators-redhat')
    assert ocp_logging_obj.set_rbac(yaml_file=constants.EO_RBAC_YAML,
                                    resource_name='prometheus-k8s')
    assert ocp_logging_obj.create_elasticsearch_subscription(
        constants.EO_SUB_YAML)

    # Deploys cluster-logging operator on the project openshift-logging
    ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
    assert ocp_logging_obj.create_clusterlogging_operator_group(
        yaml_file=constants.CL_OG_YAML)
    assert ocp_logging_obj.create_clusterlogging_subscription(
        yaml_file=constants.CL_SUB_YAML)

    # Creates storage class
    cbp_obj = helpers.create_ceph_block_pool()
    sc_obj = helpers.create_storage_class(
        interface_type=constants.CEPHBLOCKPOOL,
        interface_name=cbp_obj.name,
        secret_name=constants.DEFAULT_SECRET,
        reclaim_policy="Delete")
    assert ocp_logging_obj.create_instance_in_clusterlogging(
        sc_name=sc_obj.name)

    # Check the health of the cluster-logging
    assert ocp_logging_obj.check_health_of_clusterlogging()
    def test_respin_elasticsearch_pod(self, create_pvc_and_deploymentconfig_pod):
        """
        Test to verify respin of elasticsearch pod has no functional impact
        on logging backed by OCS.
        """

        elasticsearch_pod_obj = self.get_elasticsearch_pod_obj()

        # Respin the elastic-search pod
        elasticsearch_pod_obj.delete(force=True)

        # Checks the health of logging cluster after a respin
        assert ocp_logging_obj.check_health_of_clusterlogging()

        # Checks openshift-storage project exists and get filecount
        self.check_filecount_in_project(project=defaults.ROOK_CLUSTER_NAMESPACE)

        # Creates new-project and app-pod and checks the logs are retained
        pod_obj, pvc_obj = create_pvc_and_deploymentconfig_pod

        project = pvc_obj.project.namespace
        self.validate_project_exists(project)