def test_drain_mcg_pod_node(self, node_drain_teardown,
                                reduce_and_resume_cluster_load, pod_to_drain):
        """
        Test drianage of nodes which contain NB resources

        """
        # Retrieve the relevant pod object
        pod_obj = pod.Pod(**pod.get_pods_having_label(
            label=self.labels_map[pod_to_drain],
            namespace=defaults.ROOK_CLUSTER_NAMESPACE,
        )[0])
        # Retrieve the node name on which the pod resides
        node_name = pod_obj.get()["spec"]["nodeName"]
        # Drain the node
        drain_nodes([node_name])
        # Verify the node was drained properly
        wait_for_nodes_status([node_name],
                              status=constants.NODE_READY_SCHEDULING_DISABLED)
        # Retrieve the new pod that should've been created post-drainage
        pod_obj = pod.Pod(**pod.get_pods_having_label(
            label=self.labels_map[pod_to_drain],
            namespace=defaults.ROOK_CLUSTER_NAMESPACE,
        )[0])
        # Verify that the new pod has reached a 'RUNNNING' status again and recovered successfully
        wait_for_resource_state(pod_obj, constants.STATUS_RUNNING, timeout=120)
        # Check the NB status to verify the system is healthy
        self.cl_obj.wait_for_noobaa_health_ok()
Exemplo n.º 2
0
def get_registry_pod_obj():
    """
    Function to get registry pod obj

    Returns:
        pod_obj (list): List of Registry pod objs

    Raises:
        UnexpectedBehaviour: When image-registry pod is not present.

    """
    # Sometimes when there is a update in config crd, there will be 2 registry pods
    # i.e. old pod will be terminated and new pod will be up based on new crd
    # so below loop waits till old pod terminates
    wait_time = 30
    for iteration in range(10):
        pod_data = pod.get_pods_having_label(
            label='docker-registry=default',
            namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE)
        pod_obj = [pod.Pod(**data) for data in pod_data]
        if len(pod_obj) == 1:
            break
        elif len(pod_obj) == 0:
            raise UnexpectedBehaviour("Image-registry pod not present")
        elif iteration > 5:
            raise UnexpectedBehaviour(
                "Waited for 3 mins Image-registry pod is not in Running state")
        else:
            logger.info(
                f"Waiting for 30 sec's for registry pod to be up iteration {iteration}"
            )
            time.sleep(wait_time)
    return pod_obj
Exemplo n.º 3
0
def create_pod(desired_status=constants.STATUS_RUNNING, wait=True, **kwargs):
    """
    Create a pod

    Args:
        desired_status (str): The status of the pod to wait for
        wait (bool): True for waiting for the pod to reach the desired
            status, False otherwise
        **kwargs: The pod data yaml converted to dict

    Returns:
        Pod: A Pod instance

    Raises:
        AssertionError: In case of any failure
    """
    pod_obj = pod.Pod(**kwargs)
    pod_name = kwargs.get('metadata').get('name')
    created_resource = pod_obj.create()
    assert created_resource, (
        f"Failed to create resource {pod_name}"
    )
    if wait:
        assert pod_obj.ocp.wait_for_resource(
            condition=desired_status, resource_name=pod_name
        ), f"{pod_obj.kind} {pod_name} failed to reach"
        f"status {desired_status}"
    return pod_obj
    def test_db_scc(self, teardown):
        """
        Test noobaa db is assigned with scc(anyuid) after changing the default noobaa SCC

        """
        scc_name = constants.NOOBAA_DB_SERVICE_ACCOUNT_NAME
        service_account = constants.NOOBAA_DB_SERVICE_ACCOUNT
        pod_obj = pod.Pod(**pod.get_pods_having_label(
            label=self.labels_map["noobaa_db"],
            namespace=defaults.ROOK_CLUSTER_NAMESPACE,
        )[0])
        ocp_scc = ocp.OCP(kind=constants.SCC,
                          namespace=defaults.ROOK_CLUSTER_NAMESPACE)
        pod_data = pod_obj.get()

        log.info(f"Verifying current SCC is {scc_name} in db pod")
        assert (pod_data.get("metadata").get("annotations").get(
            "openshift.io/scc") == scc_name), "Invalid default scc"

        log.info("Deleting the user array from the Noobaa scc")
        ocp_scc.patch(
            resource_name=scc_name,
            params='[{"op": "remove", "path": "/users/0", '
            f'"value":{service_account}}}]',
            format_type="json",
        )
        assert not helpers.validate_scc_policy(
            sa_name=scc_name,
            namespace=defaults.ROOK_CLUSTER_NAMESPACE,
            scc_name=scc_name,
        ), "SA name is  present in noobaa scc"
        log.info("Adding the noobaa system sa user to anyuid scc")
        ocp_scc.patch(
            resource_name=constants.ANYUID,
            params='[{"op": "add", "path": "/users/0", '
            f'"value":{service_account}}}]',
            format_type="json",
        )
        assert helpers.validate_scc_policy(
            sa_name=scc_name,
            namespace=defaults.ROOK_CLUSTER_NAMESPACE,
            scc_name=constants.ANYUID,
        ), "SA name is not present in anyuid scc"

        pod_obj.delete(force=True)
        # Verify that the new pod has reached a 'RUNNNING' status
        assert pod_obj.ocp.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            selector=self.labels_map["noobaa_db"],
            resource_count=1,
            timeout=300,
        ), "Noobaa pod did not reach running state"
        pod_data = pod_obj.get()
        log.info("Verifying SCC is now anyuid in the db pod")
        assert (pod_data.get("metadata").get("annotations").get(
            "openshift.io/scc") == constants.ANYUID), "Invalid scc"
        # Check the NB status to verify the system is healthy
        self.cl_obj.wait_for_noobaa_health_ok()
Exemplo n.º 5
0
def create_pod(interface_type=None,
               pvc_name=None,
               desired_status=constants.STATUS_RUNNING,
               wait=True,
               namespace=defaults.ROOK_CLUSTER_NAMESPACE,
               node_name=None,
               pod_dict_path=None):
    """
    Create a pod

    Args:
        interface_type (str): The interface type (CephFS, RBD, etc.)
        pvc_name (str): The PVC that should be attached to the newly created pod
        desired_status (str): The status of the pod to wait for
        wait (bool): True for waiting for the pod to reach the desired
            status, False otherwise
        namespace (str): The namespace for the new resource creation
        node_name (str): The name of specific node to schedule the pod
        pod_dict_path (str): YAML path for the pod

    Returns:
        Pod: A Pod instance

    Raises:
        AssertionError: In case of any failure
    """
    if interface_type == constants.CEPHBLOCKPOOL:
        pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
        interface = constants.RBD_INTERFACE
    else:
        pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
        interface = constants.CEPHFS_INTERFACE

    pod_data = templating.load_yaml_to_dict(pod_dict)
    pod_data['metadata']['name'] = create_unique_resource_name(
        f'test-{interface}', 'pod')
    pod_data['metadata']['namespace'] = namespace
    if pvc_name:
        pod_data['spec']['volumes'][0]['persistentVolumeClaim'][
            'claimName'] = pvc_name

    if node_name:
        pod_data['spec']['nodeName'] = node_name
    else:
        if 'nodeName' in pod_data.get('spec'):
            del pod_data['spec']['nodeName']

    pod_obj = pod.Pod(**pod_data)
    pod_name = pod_data.get('metadata').get('name')
    created_resource = pod_obj.create(do_reload=wait)
    assert created_resource, (f"Failed to create resource {pod_name}")
    if wait:
        assert wait_for_resource_state(resource=pod_obj,
                                       state=desired_status,
                                       timeout=120)

    return pod_obj
    def test_delete_noobaa_resources(self, resource_to_delete):
        """
        Test Noobaa resources delete and check Noobaa health

        """
        pod_obj = pod.Pod(**pod.get_pods_having_label(
            label=self.labels_map[resource_to_delete],
            namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])

        pod_obj.delete(force=True)
        assert pod_obj.ocp.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            selector=self.labels_map[resource_to_delete],
            resource_count=1,
            timeout=300)
        self.cl_obj.wait_for_noobaa_health_ok()
 def finalizer():
     scc_name = constants.NOOBAA_DB_SERVICE_ACCOUNT_NAME
     service_account = constants.NOOBAA_DB_SERVICE_ACCOUNT
     pod_obj = pod.Pod(**pod.get_pods_having_label(
         label=self.labels_map["noobaa_db"],
         namespace=defaults.ROOK_CLUSTER_NAMESPACE,
     )[0])
     pod_data_list = pod_obj.get()
     ocp_scc = ocp.OCP(kind=constants.SCC,
                       namespace=defaults.ROOK_CLUSTER_NAMESPACE)
     if helpers.validate_scc_policy(
             sa_name=scc_name,
             namespace=defaults.ROOK_CLUSTER_NAMESPACE,
             scc_name=constants.ANYUID,
     ):
         ocp_scc.patch(
             resource_name=constants.ANYUID,
             params='[{"op": "remove", "path": "/users/0", '
             f'"value":{service_account}}}]',
             format_type="json",
         )
     if not helpers.validate_scc_policy(
             sa_name=scc_name,
             namespace=defaults.ROOK_CLUSTER_NAMESPACE,
             scc_name=scc_name,
     ):
         ocp_scc.patch(
             resource_name=scc_name,
             params='[{"op": "add", "path": "/users/0", '
             f'"value":{service_account}}}]',
             format_type="json",
         )
     if (pod_data_list.get("metadata").get("annotations").get(
             "openshift.io/scc") == constants.ANYUID):
         pod_obj.delete(force=True)
         assert pod_obj.ocp.wait_for_resource(
             condition=constants.STATUS_RUNNING,
             selector=self.labels_map["noobaa_db"],
             resource_count=1,
             timeout=300,
         ), "Noobaa pod did not reach running state"
         pod_data_list = pod_obj.get()
         assert (pod_data_list.get("metadata").get("annotations").get(
             "openshift.io/scc") == scc_name), "Invalid scc"
Exemplo n.º 8
0
    def test_restart_noobaa_resources(self, resource_to_delete):
        """
        Test Noobaa resources restart and check Noobaa health

        """
        labels_map = {
            'noobaa_core': constants.NOOBAA_CORE_POD_LABEL,
            'noobaa_db': constants.NOOBAA_DB_LABEL
        }
        pod_obj = self.resource_obj = pod.Pod(**pod.get_pods_having_label(
            label=labels_map[resource_to_delete],
            namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])

        pod_obj.delete(force=True)
        assert pod_obj.ocp.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            selector=labels_map[resource_to_delete],
            resource_count=1,
            timeout=300)
        self.cl_obj.wait_for_noobaa_health_ok()
Exemplo n.º 9
0
def get_registry_pod_obj():
    """
    Function to get registry pod obj

    Returns:
        pod_obj (list): List of Registry pod objs

    Raises:
        UnexpectedBehaviour: When image-registry pod is not present.

    """
    # Sometimes when there is a update in config crd, there will be 2 registry pods
    # i.e. old pod will be terminated and new pod will be up based on new crd
    # so below loop waits till old pod terminates

    registry_deployment = ocp.OCP(
        kind="deployment",
        namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE,
        resource_name=constants.OPENSHIFT_IMAGE_REGISTRY_DEPLOYMENT,
    )
    replicas = registry_deployment.data["spec"].get("replicas", 1)
    registry_pods = ocp.OCP(
        kind="pod",
        namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE,
        selector=constants.OPENSHIFT_IMAGE_SELECTOR,
    )
    registry_pods.wait_for_resource(
        condition=constants.STATUS_RUNNING,
        timeout=400,
        resource_count=replicas,
        dont_allow_other_resources=True,
    )
    pod_objs = [pod.Pod(**data) for data in registry_pods.data["items"]]
    pod_objs_len = len(pod_objs)
    if pod_objs_len == 0:
        raise UnexpectedBehaviour("No image-registry pod is present!")
    elif pod_objs_len != replicas:
        raise UnexpectedBehaviour(
            f"Expected {replicas} image-registry pod(s), but {pod_objs_len} "
            f"found!")
    return pod_objs
Exemplo n.º 10
0
    def test_mcg_namespace_disruptions_crd(
        self,
        mcg_obj,
        cld_mgr,
        awscli_pod,
        bucketclass_dict,
        bucket_factory,
        node_drain_teardown,
    ):
        """
        Test MCG namespace disruption flow

        1. Create NS resources with CRDs
        2. Create NS bucket with CRDs
        3. Upload to NS bucket
        4. Delete noobaa related pods and verify integrity of objects
        5. Create public access policy on NS bucket and verify Get op
        6. Drain nodes containing noobaa pods and verify integrity of objects
        7. Perform put operation to validate public access denial
        7. Edit/verify and remove objects on NS bucket

        """
        data = "Sample string content to write to a S3 object"
        object_key = "ObjKey-" + str(uuid.uuid4().hex)
        awscli_node_name = awscli_pod.get()["spec"]["nodeName"]

        aws_s3_creds = {
            "access_key_id": cld_mgr.aws_client.access_key,
            "access_key": cld_mgr.aws_client.secret_key,
            "endpoint": constants.MCG_NS_AWS_ENDPOINT,
            "region": config.ENV_DATA["region"],
        }

        # S3 account details
        user_name = "nb-user" + str(uuid.uuid4().hex)
        email = user_name + "@mail.com"

        logger.info("Setting up test files for upload, to the bucket/resources")
        setup_base_objects(awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3)

        # Create the namespace resource and verify health
        ns_buc = bucket_factory(
            amount=1,
            interface=bucketclass_dict["interface"],
            bucketclass=bucketclass_dict,
        )[0]
        ns_bucket = ns_buc.name

        aws_target_bucket = ns_buc.bucketclass.namespacestores[0].uls_name

        logger.info(f"Namespace bucket: {ns_bucket} created")

        logger.info(f"Uploading objects to ns bucket: {ns_bucket}")
        sync_object_directory(
            awscli_pod,
            src=MCG_NS_ORIGINAL_DIR,
            target=f"s3://{ns_bucket}",
            s3_obj=mcg_obj,
        )

        for pod_to_respin in self.labels_map:
            logger.info(f"Re-spinning mcg resource: {self.labels_map[pod_to_respin]}")
            pod_obj = pod.Pod(
                **pod.get_pods_having_label(
                    label=self.labels_map[pod_to_respin],
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                )[0]
            )

            pod_obj.delete(force=True)

            assert pod_obj.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=self.labels_map[pod_to_respin],
                resource_count=1,
                timeout=300,
            )

            logger.info(
                f"Downloading objects from ns bucket: {ns_bucket} "
                f"after re-spinning: {self.labels_map[pod_to_respin]}"
            )
            sync_object_directory(
                awscli_pod,
                src=f"s3://{ns_bucket}",
                target=MCG_NS_RESULT_DIR,
                s3_obj=mcg_obj,
            )

            logger.info(
                f"Verifying integrity of objects "
                f"after re-spinning: {self.labels_map[pod_to_respin]}"
            )
            compare_directory(
                awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3
            )

        # S3 account
        user = NoobaaAccount(mcg_obj, name=user_name, email=email, buckets=[ns_bucket])
        logger.info(f"Noobaa account: {user.email_id} with S3 access created")

        # Admin sets Public access policy(*)
        bucket_policy_generated = gen_bucket_policy(
            user_list=["*"],
            actions_list=["GetObject"],
            resources_list=[f'{ns_bucket}/{"*"}'],
        )
        bucket_policy = json.dumps(bucket_policy_generated)

        logger.info(
            f"Creating bucket policy on bucket: {ns_bucket} with wildcard (*) Principal"
        )
        put_policy = put_bucket_policy(mcg_obj, ns_bucket, bucket_policy)
        logger.info(f"Put bucket policy response from Admin: {put_policy}")

        logger.info(f"Getting bucket policy on bucket: {ns_bucket}")
        get_policy = get_bucket_policy(mcg_obj, ns_bucket)
        logger.info(f"Got bucket policy: {get_policy['Policy']}")

        # MCG admin writes an object to bucket
        logger.info(f"Writing object on bucket: {ns_bucket} by admin")
        assert s3_put_object(mcg_obj, ns_bucket, object_key, data), "Failed: PutObject"

        # Verifying whether Get operation is allowed to any S3 user
        logger.info(
            f"Get object action on namespace bucket: {ns_bucket} "
            f"with user: {user.email_id}"
        )
        assert s3_get_object(user, ns_bucket, object_key), "Failed: GetObject"

        # Upload files to NS target
        logger.info(
            f"Uploading objects directly to ns resource target: {aws_target_bucket}"
        )
        sync_object_directory(
            awscli_pod,
            src=MCG_NS_ORIGINAL_DIR,
            target=f"s3://{aws_target_bucket}",
            signed_request_creds=aws_s3_creds,
        )

        for pod_to_drain in self.labels_map:
            pod_obj = pod.Pod(
                **pod.get_pods_having_label(
                    label=self.labels_map[pod_to_drain],
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                )[0]
            )

            # Retrieve the node name on which the pod resides
            node_name = pod_obj.get()["spec"]["nodeName"]

            if awscli_node_name == node_name:
                logger.info(
                    f"Skipping node drain since aws cli pod node: "
                    f"{awscli_node_name} is same as {pod_to_drain} "
                    f"pod node: {node_name}"
                )
                continue

            # Drain the node
            drain_nodes([node_name])
            wait_for_nodes_status(
                [node_name], status=constants.NODE_READY_SCHEDULING_DISABLED
            )
            schedule_nodes([node_name])
            wait_for_nodes_status(timeout=300)

            # Retrieve the new pod
            pod_obj = pod.Pod(
                **pod.get_pods_having_label(
                    label=self.labels_map[pod_to_drain],
                    namespace=defaults.ROOK_CLUSTER_NAMESPACE,
                )[0]
            )
            wait_for_resource_state(pod_obj, constants.STATUS_RUNNING, timeout=120)

            # Verify all storage pods are running
            wait_for_storage_pods()

            logger.info(
                f"Downloading objects from ns bucket: {ns_bucket} "
                f"after draining node: {node_name} with pod {pod_to_drain}"
            )
            sync_object_directory(
                awscli_pod,
                src=f"s3://{ns_bucket}",
                target=MCG_NS_RESULT_DIR,
                s3_obj=mcg_obj,
            )

            logger.info(
                f"Verifying integrity of objects "
                f"after draining node with pod: {pod_to_drain}"
            )
            compare_directory(
                awscli_pod, MCG_NS_ORIGINAL_DIR, MCG_NS_RESULT_DIR, amount=3
            )

        logger.info(f"Editing the namespace resource bucket: {ns_bucket}")
        namespace_bucket_update(
            mcg_obj,
            bucket_name=ns_bucket,
            read_resource=[aws_target_bucket],
            write_resource=aws_target_bucket,
        )

        logger.info(f"Verifying object download after edit on ns bucket: {ns_bucket}")
        sync_object_directory(
            awscli_pod,
            src=f"s3://{ns_bucket}",
            target=MCG_NS_RESULT_DIR,
            s3_obj=mcg_obj,
        )

        # Verifying whether Put object action is denied
        logger.info(
            f"Verifying whether user: {user.email_id} has only public read access"
        )

        logger.info(f"Removing objects from ns bucket: {ns_bucket}")
        rm_object_recursive(awscli_pod, target=ns_bucket, mcg_obj=mcg_obj)
Exemplo n.º 11
0
def create_pod(interface_type=None,
               pvc_name=None,
               do_reload=True,
               namespace=defaults.ROOK_CLUSTER_NAMESPACE,
               node_name=None,
               pod_dict_path=None,
               sa_name=None,
               dc_deployment=False,
               raw_block_pv=False,
               raw_block_device=constants.RAW_BLOCK_DEVICE,
               replica_count=1,
               pod_name=None):
    """
    Create a pod

    Args:
        interface_type (str): The interface type (CephFS, RBD, etc.)
        pvc_name (str): The PVC that should be attached to the newly created pod
        do_reload (bool): True for reloading the object after creation, False otherwise
        namespace (str): The namespace for the new resource creation
        node_name (str): The name of specific node to schedule the pod
        pod_dict_path (str): YAML path for the pod
        sa_name (str): Serviceaccount name
        dc_deployment (bool): True if creating pod as deploymentconfig
        raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
        raw_block_device (str): raw block device for the pod
        replica_count (int): Replica count for deployment config
        pod_name (str): Name of the pod to create

    Returns:
        Pod: A Pod instance

    Raises:
        AssertionError: In case of any failure
    """
    if interface_type == constants.CEPHBLOCKPOOL:
        pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
        interface = constants.RBD_INTERFACE
    else:
        pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
        interface = constants.CEPHFS_INTERFACE
    if dc_deployment:
        pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
    pod_data = templating.load_yaml(pod_dict)
    if not pod_name:
        pod_name = create_unique_resource_name(f'test-{interface}', 'pod')
    pod_data['metadata']['name'] = pod_name
    pod_data['metadata']['namespace'] = namespace
    if dc_deployment:
        pod_data['metadata']['labels']['app'] = pod_name
        pod_data['spec']['template']['metadata']['labels']['name'] = pod_name
        pod_data['spec']['replicas'] = replica_count

    if pvc_name:
        if dc_deployment:
            pod_data['spec']['template']['spec']['volumes'][0][
                'persistentVolumeClaim']['claimName'] = pvc_name
        else:
            pod_data['spec']['volumes'][0]['persistentVolumeClaim'][
                'claimName'] = pvc_name

    if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
        pod_data['spec']['containers'][0]['volumeDevices'][0][
            'devicePath'] = raw_block_device
        pod_data['spec']['containers'][0]['volumeDevices'][0][
            'name'] = pod_data.get('spec').get('volumes')[0].get('name')

    if node_name:
        pod_data['spec']['nodeName'] = node_name
    else:
        if 'nodeName' in pod_data.get('spec'):
            del pod_data['spec']['nodeName']
    if sa_name and dc_deployment:
        pod_data['spec']['template']['spec']['serviceAccountName'] = sa_name
    if dc_deployment:
        ocs_obj = create_resource(**pod_data)
        logger.info(ocs_obj.name)
        assert (ocp.OCP(kind='pod', namespace=namespace)).wait_for_resource(
            condition=constants.STATUS_COMPLETED,
            resource_name=pod_name + '-1-deploy',
            resource_count=0,
            timeout=180,
            sleep=3)
        dpod_list = pod.get_all_pods(namespace=namespace)
        for dpod in dpod_list:
            if '-1-deploy' not in dpod.name:
                if pod_name in dpod.name:
                    return dpod
    else:
        pod_obj = pod.Pod(**pod_data)
        pod_name = pod_data.get('metadata').get('name')
        logger.info(f'Creating new Pod {pod_name} for test')
        created_resource = pod_obj.create(do_reload=do_reload)
        assert created_resource, (f"Failed to create Pod {pod_name}")

        return pod_obj
Exemplo n.º 12
0
    def test_quay_with_failures(self, quay_operator):
        """
        Test quay operations with Noobaa core failure

        1. Creates quay operator and registry on ODF.
        2. Initializes Quay super user to access the API's.
        3. Gets the super user token.
        4. Creates a new repo
        5. Creates a new org
        6. Pushes the image to the new repo
        7. Pulls the image locally from the quay repo
        8. Re-spins noobaa core
        9. Pulls the image again
        10. Deletes the repo
        """
        # Deploy quay operator
        quay_operator.setup_quay_operator()

        # Create quay registry
        quay_operator.create_quay_registry()
        log.info("Waiting for quay endpoint to start serving")
        sleep(90)
        endpoint = quay_operator.get_quay_endpoint()

        log.info("Pulling test image")
        exec_cmd(f"podman pull {constants.COSBENCH_IMAGE}")

        log.info("Getting the Super user token")
        token = get_super_user_token(endpoint)

        log.info("Validating super_user using token")
        check_super_user(endpoint, token)

        podman_url = endpoint.replace("https://", "")
        log.info(f"Logging into quay endpoint: {podman_url}")
        quay_super_user_login(podman_url)

        repo_name = "test_repo"
        test_image = f"{constants.QUAY_SUPERUSER}/{repo_name}:latest"
        create_quay_repository(
            endpoint, token, org_name=constants.QUAY_SUPERUSER, repo_name=repo_name
        )
        org_name = "test_org"
        log.info(f"Creating a new organization name: {org_name}")
        create_quay_org(endpoint, token, org_name)

        log.info("Tagging a test image")
        exec_cmd(f"podman tag {constants.COSBENCH_IMAGE} {podman_url}/{test_image}")
        log.info(f"Pushing the test image to quay repo: {repo_name}")
        exec_cmd(f"podman push {podman_url}/{test_image} --tls-verify=false")

        log.info(f"Validating whether the image can be pull from quay: {repo_name}")
        exec_cmd(f"podman pull {podman_url}/{test_image} --tls-verify=false")

        # TODO: Trigger build
        pod_obj = pod.Pod(
            **pod.get_pods_having_label(
                label=constants.NOOBAA_CORE_POD_LABEL,
                namespace=defaults.ROOK_CLUSTER_NAMESPACE,
            )[0]
        )
        pod_obj.delete(force=True)
        assert pod_obj.ocp.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            selector=constants.NOOBAA_CORE_POD_LABEL,
            resource_count=1,
            timeout=800,
            sleep=60,
        )
        log.info("Pulling the image again from quay, post noobaa core failure")
        exec_cmd(f"podman pull {podman_url}/{test_image} --tls-verify=false")

        log.info(f"Deleting the repository: {repo_name}")
        delete_quay_repository(
            endpoint, token, org_name=constants.QUAY_SUPERUSER, repo_name=repo_name
        )