Ejemplo n.º 1
0
def measure_pvc_creation_time(interface, pvc_name):
    """
    Measure PVC creation time based on logs

    Args:
        interface (str): The interface backed the PVC
        pvc_name (str): Name of the PVC for creation time measurement

    Returns:
        float: Creation time for the PVC

    """
    format = '%H:%M:%S.%f'
    # Get the correct provisioner pod based on the interface
    if interface == constants.CEPHBLOCKPOOL:
        pod_name = pod.get_rbd_provisioner_pod().name
    else:
        pod_name = pod.get_cephfs_provisioner_pod().name

    # get the logs from the csi-provisioner container
    logs = pod.get_pod_logs(pod_name, 'csi-provisioner')
    logs = logs.split("\n")
    # Extract the starting time for the PVC provisioning
    start = [
        i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
    ][0].split(' ')[1]
    # Extract the end time for the PVC provisioning
    end = [
        i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
    ][0].split(' ')[1]
    total = (datetime.datetime.strptime(end, format) -
             datetime.datetime.strptime(start, format))
    return total.total_seconds()
Ejemplo n.º 2
0
def get_end_creation_time(interface, pvc_name):
    """
    Get the ending creation time of a PVC based on provisioner logs

    Args:
        interface (str): The interface backed the PVC
        pvc_name (str): Name of the PVC for creation time measurement

    Returns:
        datetime object: End time of PVC creation

    """
    format = '%H:%M:%S.%f'
    # Get the correct provisioner pod based on the interface
    if interface == constants.CEPHBLOCKPOOL:
        pod_name = pod.get_rbd_provisioner_pod().name
    else:
        pod_name = pod.get_cephfs_provisioner_pod().name

    # get the logs from the csi-provisioner container
    logs = pod.get_pod_logs(pod_name, 'csi-provisioner')
    logs = logs.split("\n")
    # Extract the starting time for the PVC provisioning
    end = [
        i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
    ]
    end = end[0].split(' ')[1]
    return datetime.datetime.strptime(end, format)
Ejemplo n.º 3
0
 def test_noobaa_kms_validation(self):
     """
     Validate from logs that there is successfully used NooBaa with KMS integration.
     """
     operator_pod = pod.get_pods_having_label(
         label=constants.NOOBAA_OPERATOR_POD_LABEL,
         namespace=defaults.ROOK_CLUSTER_NAMESPACE,
     )[0]
     operator_logs = pod.get_pod_logs(
         pod_name=operator_pod["metadata"]["name"])
     assert "found root secret in external KMS successfully" in operator_logs
 def test_deployer_logs_not_empty(self):
     """
     Test that the logs of manager container of ocs-osd-controller-manager pod are not empty
     """
     deployer_pod = pod.get_pods_having_label(
         constants.MANAGED_CONTROLLER_LABEL,
         constants.OPENSHIFT_STORAGE_NAMESPACE)[0]
     deployer_logs = pod.get_pod_logs(
         pod_name=deployer_pod["metadata"]["name"], container="manager")
     log_lines = deployer_logs.split("\n")
     for line in log_lines:
         if "ERR" in line:
             log.info(f"{line}")
     log.info(f"Deployer log has {len(log_lines)} lines.")
     assert len(log_lines) > 100
Ejemplo n.º 5
0
def wait_for_active_pods(job, desired_count, timeout=3):
    """
    Wait for job to load desired number of active pods in time specified
    in timeout.

    Args:
        job (obj): OCS job object
        desired_count (str): Number of desired active pods for provided job
        timeout (int): Number of seconds to wait for the job to get into state

    Returns:
        bool: If job has desired number of active pods

    """
    job_name = job.name
    log.info(f"Checking number of active pods for job {job_name}")

    def _retrieve_job_state():
        job_obj = job.ocp.get(resource_name=job_name, out_yaml_format=True)
        return job_obj["status"]["active"]

    try:
        for state in TimeoutSampler(timeout=timeout,
                                    sleep=3,
                                    func=_retrieve_job_state):
            if state == desired_count:
                return True
            else:
                log.debug(f"Number of active pods for job {job_name}: {state}")
    except TimeoutExpiredError:
        log.error(
            f"Job {job_name} doesn't have correct number of active pods ({desired_count})"
        )
        job_pods = pod.get_pods_having_label(f"job-name={job_name}",
                                             job.namespace)
        for job_pod in job_pods:
            log.info(
                f"Description of job pod {job_pod['metadata']['name']}: {job_pod}"
            )
            pod_logs = pod.get_pod_logs(
                job_pod["metadata"]["name"],
                namespace=job_pod["metadata"]["namespace"])
            log.info(f"Logs from job pod {job_pod['metadata']['name']} are "
                     f"available on DEBUG level")
            log.debug(
                f"Logs from job pod {job_pod['metadata']['name']}: {pod_logs}")

        return False
Ejemplo n.º 6
0
def test_verify_noobaa_db_service(mcg_obj_session):
    """
    Validates whether MCG cli and noobaa db logs does not check 'noobaa-db'
    """
    # Get noobaa status
    status = mcg_obj_session.exec_mcg_cmd("status").stderr
    assert (
        'Service "noobaa-db"' not in status
    ), "Error in MCG Cli status regarding non-existent noobaa-db service"
    log.info(
        "Verified: noobaa status does not contain error related to `noobaa-db` service."
    )

    # verify noobaa db logs
    pattern = "Not found: Service noobaa-db"
    noobaa_db_log = get_pod_logs(pod_name=constants.NB_DB_NAME_47_AND_ABOVE)
    assert (re.search(pattern=pattern, string=noobaa_db_log) is
            None), f"Error: {pattern} msg found in the noobaa db logs."
Ejemplo n.º 7
0
def test_pod_log_after_upgrade():
    """
    Check OSD/MON/MGR pod logs after upgrade and verify the expected log exist

    """
    pod_objs = get_osd_pods() + get_mon_pods() + get_mgr_pods()
    pod_names = [osd_pod_obj.name for osd_pod_obj in pod_objs]
    expected_log_after_upgrade = "set uid:gid to 167:167 (ceph:ceph)"
    logging.info(f"Check that the log '{expected_log_after_upgrade}' "
                 f"appears after the osd/mon/mg pod is initialized")
    for pod_name in pod_names:
        pod_logs = get_pod_logs(pod_name=pod_name, all_containers=True)
        assert expected_log_after_upgrade in pod_logs, (
            f"The expected log after upgrade '{expected_log_after_upgrade}' does not exist"
            f" on pod {pod_name}")
    logging.info(
        f"The log '{expected_log_after_upgrade}' appears in all relevant pods."
    )
 def test_provider_server_logs(self):
     """
     Test that the logs of ocs-provider-server pod have entries for each consumer
     """
     provider_pod = pod.get_pods_having_label(
         constants.PROVIDER_SERVER_LABEL,
         constants.OPENSHIFT_STORAGE_NAMESPACE)[0]
     provider_logs = pod.get_pod_logs(
         pod_name=provider_pod["metadata"]["name"])
     log_lines = provider_logs.split("\n")
     consumer_names = managedservice.get_consumer_names()
     for consumer_name in consumer_names:
         expected_log = (
             f'successfully Enabled the StorageConsumer resource "{consumer_name}"'
         )
         log_found = False
         for line in log_lines:
             if expected_log in line:
                 log_found = True
                 log.info(
                     f"'{expected_log}' found in ocs-provider-server logs")
                 break
         assert log_found, f"'{expected_log}' not found in ocs-provider-server logs"
Ejemplo n.º 9
0
    def wait_for_workload(self, workload_id, sleep=1, timeout=60):
        """
        Waits for the cosbench workload to complete

        Args:
            workload_id (str): ID of cosbench workload
            sleep: sleep in seconds
            timeout: timeout in seconds to check if mirroring

        Returns:
            bool: Whether cosbench workload processed successfully

        """
        logger.info(f"Waiting for workload {workload_id} to be processed")
        pattern = f"sucessfully processed workload {workload_id}"
        try:
            for ret in TimeoutSampler(
                    timeout=timeout,
                    sleep=sleep,
                    func=get_pod_logs,
                    pod_name=self.cosbench_pod.name,
                    namespace=self.namespace,
            ):
                if re.search(pattern=pattern, string=ret):
                    break
            logger.info(
                f"Verified: Workload {workload_id} processed successfully")
            return True
        except TimeoutExpiredError:
            logger.error(
                f"Workload {workload_id} did not complete. Dumping cosbench pod log"
            )
            # Log cosbench pod for debugging purpose
            cosbench_log = get_pod_logs(pod_name=self.cosbench_pod.name,
                                        namespace=self.namespace)
            logger.debug(cosbench_log)
            return False
Ejemplo n.º 10
0
    def test_recovery_from_volume_deletion(self, nodes, pvc_factory,
                                           pod_factory):
        """
        Test cluster recovery from disk deletion from the platform side.
        Based on documented procedure detailed in
        https://bugzilla.redhat.com/show_bug.cgi?id=1823183

        """
        logger.info("Picking a PV which to be deleted from the platform side")
        osd_pvs = get_deviceset_pvs()
        osd_pv = random.choice(osd_pvs)
        osd_pv_name = osd_pv.name
        # get the claim name
        logger.info(f"Getting the claim name for OSD PV {osd_pv_name}")
        claim_name = osd_pv.get().get("spec").get("claimRef").get("name")

        # Get the backing volume name
        logger.info(f"Getting the backing volume name for PV {osd_pv_name}")
        backing_volume = nodes.get_data_volumes(pvs=[osd_pv])[0]

        # Get the corresponding PVC
        logger.info(f"Getting the corresponding PVC of PV {osd_pv_name}")
        osd_pvcs = get_deviceset_pvcs()
        osd_pvcs_count = len(osd_pvcs)
        osd_pvc = [
            ds for ds in osd_pvcs
            if ds.get().get("metadata").get("name") == claim_name
        ][0]

        # Get the corresponding OSD pod and ID
        logger.info(f"Getting the OSD pod using PVC {osd_pvc.name}")
        osd_pods = get_osd_pods()
        osd_pods_count = len(osd_pods)
        osd_pod = [
            osd_pod for osd_pod in osd_pods
            if osd_pod.get().get("metadata").get("labels").get(
                constants.CEPH_ROOK_IO_PVC_LABEL) == claim_name
        ][0]
        logger.info(f"OSD_POD {osd_pod.name}")
        osd_id = osd_pod.get().get("metadata").get("labels").get("ceph-osd-id")

        # Get the node that has the OSD pod running on
        logger.info(
            f"Getting the node that has the OSD pod {osd_pod.name} running on")
        osd_node = get_pod_node(osd_pod)
        osd_prepare_pods = get_osd_prepare_pods()
        osd_prepare_pod = [
            pod for pod in osd_prepare_pods if pod.get().get("metadata").get(
                "labels").get(constants.CEPH_ROOK_IO_PVC_LABEL) == claim_name
        ][0]
        osd_prepare_job_name = (osd_prepare_pod.get().get("metadata").get(
            "labels").get("job-name"))
        osd_prepare_job = get_job_obj(osd_prepare_job_name)

        # Get the corresponding OSD deployment
        logger.info(f"Getting the OSD deployment for OSD PVC {claim_name}")
        osd_deployment = [
            osd_pod for osd_pod in get_osd_deployments()
            if osd_pod.get().get("metadata").get("labels").get(
                constants.CEPH_ROOK_IO_PVC_LABEL) == claim_name
        ][0]
        osd_deployment_name = osd_deployment.name

        # Delete the volume from the platform side
        logger.info(f"Deleting {backing_volume} from the platform side")
        nodes.detach_volume(backing_volume, osd_node)

        # Scale down OSD deployment
        logger.info(f"Scaling down OSD deployment {osd_deployment_name} to 0")
        ocp.OCP().exec_oc_cmd(
            f"scale --replicas=0 deployment/{osd_deployment_name}")

        # Force delete OSD pod if necessary
        osd_pod_name = osd_pod.name
        logger.info(f"Waiting for OSD pod {osd_pod.name} to get deleted")
        try:
            osd_pod.ocp.wait_for_delete(resource_name=osd_pod_name)
        except TimeoutError:
            osd_pod.delete(force=True)
            osd_pod.ocp.wait_for_delete(resource_name=osd_pod_name)

        # Run ocs-osd-removal job
        ocp_version = float(get_ocp_version())
        if ocp_version >= 4.6:
            cmd = f"process ocs-osd-removal -p FAILED_OSD_IDS={osd_id} -o yaml"
        else:
            cmd = f"process ocs-osd-removal -p FAILED_OSD_ID={osd_id} -o yaml"

        logger.info(f"Executing OSD removal job on OSD-{osd_id}")
        ocp_obj = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
        osd_removal_job_yaml = ocp_obj.exec_oc_cmd(cmd)
        osd_removal_job = OCS(**osd_removal_job_yaml)
        osd_removal_job.create(do_reload=False)

        # Get ocs-osd-removal pod name
        logger.info("Getting the ocs-osd-removal pod name")
        osd_removal_pod_name = get_osd_removal_pod_name(osd_id)
        osd_removal_pod_obj = get_pod_obj(osd_removal_pod_name,
                                          namespace="openshift-storage")
        osd_removal_pod_obj.ocp.wait_for_resource(
            condition=constants.STATUS_COMPLETED,
            resource_name=osd_removal_pod_name)

        # Verify OSD removal from the ocs-osd-removal pod logs
        logger.info(
            f"Verifying removal of OSD from {osd_removal_pod_name} pod logs")
        logs = get_pod_logs(osd_removal_pod_name)
        pattern = f"purged osd.{osd_id}"
        assert re.search(pattern, logs)

        osd_pvc_name = osd_pvc.name

        if ocp_version < 4.6:
            # Delete the OSD prepare job
            logger.info(f"Deleting OSD prepare job {osd_prepare_job_name}")
            osd_prepare_job.delete()
            osd_prepare_job.ocp.wait_for_delete(
                resource_name=osd_prepare_job_name, timeout=120)

            # Delete the OSD PVC
            logger.info(f"Deleting OSD PVC {osd_pvc_name}")
            osd_pvc.delete()
            osd_pvc.ocp.wait_for_delete(resource_name=osd_pvc_name)

            # Delete the OSD deployment
            logger.info(f"Deleting OSD deployment {osd_deployment_name}")
            osd_deployment.delete()
            osd_deployment.ocp.wait_for_delete(
                resource_name=osd_deployment_name, timeout=120)
        else:
            # If ocp version is '4.6' and above the osd removal job should
            # delete the OSD prepare job, OSD PVC, OSD deployment
            logger.info(
                f"Verifying deletion of OSD prepare job {osd_prepare_job_name}"
            )
            osd_prepare_job.ocp.wait_for_delete(
                resource_name=osd_prepare_job_name, timeout=30)
            logger.info(f"Verifying deletion of OSD PVC {osd_pvc_name}")
            osd_pvc.ocp.wait_for_delete(resource_name=osd_pvc_name, timeout=30)
            logger.info(
                f"Verifying deletion of OSD deployment {osd_deployment_name}")
            osd_deployment.ocp.wait_for_delete(
                resource_name=osd_deployment_name, timeout=30)

        # Delete PV
        logger.info(f"Verifying deletion of PV {osd_pv_name}")
        try:
            osd_pv.ocp.wait_for_delete(resource_name=osd_pv_name)
        except TimeoutError:
            osd_pv.delete()
            osd_pv.ocp.wait_for_delete(resource_name=osd_pv_name)

        if ocp_version < 4.6:
            # Delete the rook ceph operator pod to trigger reconciliation
            rook_operator_pod = get_operator_pods()[0]
            logger.info(
                f"deleting Rook Ceph operator pod {rook_operator_pod.name}")
            rook_operator_pod.delete()

        # Delete the OSD removal job
        logger.info(f"Deleting OSD removal job ocs-osd-removal-{osd_id}")
        osd_removal_job = get_job_obj(f"ocs-osd-removal-{osd_id}")
        osd_removal_job.delete()
        osd_removal_job.ocp.wait_for_delete(
            resource_name=f"ocs-osd-removal-{osd_id}")

        timeout = 600
        # Wait for OSD PVC to get created and reach Bound state
        logger.info(
            "Waiting for a new OSD PVC to get created and reach Bound state")
        assert osd_pvc.ocp.wait_for_resource(
            timeout=timeout,
            condition=constants.STATUS_BOUND,
            selector=constants.OSD_PVC_GENERIC_LABEL,
            resource_count=osd_pvcs_count,
        ), (f"Cluster recovery failed after {timeout} seconds. "
            f"Expected to have {osd_pvcs_count} OSD PVCs in status Bound. Current OSD PVCs status: "
            f"{[pvc.ocp.get_resource(pvc.get().get('metadata').get('name'), 'STATUS') for pvc in get_deviceset_pvcs()]}"
            )
        # Wait for OSD pod to get created and reach Running state
        logger.info(
            "Waiting for a new OSD pod to get created and reach Running state")
        assert osd_pod.ocp.wait_for_resource(
            timeout=timeout,
            condition=constants.STATUS_RUNNING,
            selector=constants.OSD_APP_LABEL,
            resource_count=osd_pods_count,
        ), (f"Cluster recovery failed after {timeout} seconds. "
            f"Expected to have {osd_pods_count} OSD pods in status Running. Current OSD pods status: "
            f"{[osd_pod.ocp.get_resource(pod.get().get('metadata').get('name'), 'STATUS') for pod in get_osd_pods()]}"
            )

        # We need to silence the old osd crash warning due to BZ https://bugzilla.redhat.com/show_bug.cgi?id=1896810
        # This is a workaround - issue for tracking: https://github.com/red-hat-storage/ocs-ci/issues/3438
        if ocp_version >= 4.6:
            silence_osd_crash = cluster.wait_for_silence_ceph_osd_crash_warning(
                osd_pod_name)
            if not silence_osd_crash:
                logger.info("Didn't find ceph osd crash warning")

        # Validate cluster is still functional
        self.sanity_helpers.health_check(tries=100)
        self.sanity_helpers.create_resources(pvc_factory, pod_factory)
Ejemplo n.º 11
0
    def test_rgw_kafka_notifications(self, bucket_factory):
        """
        Test to verify rgw kafka notifications

        """
        # Get sc
        sc = default_storage_class(interface_type=constants.CEPHBLOCKPOOL)

        # Deploy amq cluster
        self.amq.setup_amq_cluster(sc.name)

        # Create topic
        self.kafka_topic = self.amq.create_kafka_topic()

        # Create Kafkadrop pod
        (
            self.kafkadrop_pod,
            self.kafkadrop_pod,
            self.kafkadrop_route,
        ) = self.amq.create_kafkadrop()

        # Get the kafkadrop route
        kafkadrop_host = self.kafkadrop_route.get().get("spec").get("host")

        # Create bucket
        bucketname = bucket_factory(amount=1, interface="RGW-OC")[0].name

        # Get RGW credentials
        rgw_obj = RGW()
        rgw_endpoint, access_key, secret_key = rgw_obj.get_credentials()

        # Clone notify repo
        notify_path = clone_notify()

        # Initialise to put objects
        data = "A random string data to write on created rgw bucket"
        obc_obj = OBC(bucketname)
        s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=rgw_endpoint,
            aws_access_key_id=obc_obj.access_key_id,
            aws_secret_access_key=obc_obj.access_key,
        )
        s3_client = s3_resource.meta.client

        # Initialize notify command to run
        notify_cmd = (
            f"python {notify_path} -e {rgw_endpoint} -a {obc_obj.access_key_id} "
            f"-s {obc_obj.access_key} -b {bucketname} -ke {constants.KAFKA_ENDPOINT} -t {self.kafka_topic.name}"
        )
        log.info(f"Running cmd {notify_cmd}")

        # Put objects to bucket
        assert s3_client.put_object(Bucket=bucketname, Key="key-1",
                                    Body=data), "Failed: Put object: key-1"
        exec_cmd(notify_cmd)

        # Validate rgw logs notification are sent
        # No errors are seen
        pattern = "ERROR: failed to create push endpoint"
        rgw_pod_obj = get_rgw_pods()
        rgw_log = get_pod_logs(pod_name=rgw_pod_obj[0].name, container="rgw")
        assert re.search(pattern=pattern, string=rgw_log) is None, (
            f"Error: {pattern} msg found in the rgw logs."
            f"Validate {pattern} found on rgw logs and also "
            f"rgw bucket notification is working correctly")
        assert s3_client.put_object(Bucket=bucketname, Key="key-2",
                                    Body=data), "Failed: Put object: key-2"
        exec_cmd(notify_cmd)

        # Validate message are received Kafka side using curl command
        # A temporary way to check from Kafka side, need to check from UI
        curl_command = (
            f"curl -X GET {kafkadrop_host}/topic/{self.kafka_topic.name} "
            "-H 'content-type: application/vnd.kafka.json.v2+json'")
        json_output = run_cmd(cmd=curl_command)
        new_string = json_output.split()
        messages = new_string[new_string.index("messages</td>") + 1]
        if messages.find("1") == -1:
            raise Exception(
                "Error: Messages are not recieved from Kafka side."
                "RGW bucket notification is not working as expected.")

        # Validate the timestamp events
        ocs_version = config.ENV_DATA["ocs_version"]
        if Version.coerce(ocs_version) >= Version.coerce("4.8"):
            cmd = (
                f"bin/kafka-console-consumer.sh --bootstrap-server {constants.KAFKA_ENDPOINT} "
                f"--topic {self.kafka_topic.name} --from-beginning --timeout-ms 20000"
            )
            pod_list = get_pod_name_by_pattern(
                pattern="my-cluster-zookeeper",
                namespace=constants.AMQ_NAMESPACE)
            zookeeper_obj = get_pod_obj(name=pod_list[0],
                                        namespace=constants.AMQ_NAMESPACE)
            event_obj = zookeeper_obj.exec_cmd_on_pod(command=cmd)
            log.info(f"Event obj: {event_obj}")
            event_time = event_obj.get("Records")[0].get("eventTime")
            format_string = "%Y-%m-%dT%H:%M:%S.%fZ"
            try:
                datetime.strptime(event_time, format_string)
            except ValueError as ef:
                log.error(
                    f"Timestamp event {event_time} doesnt match the pattern {format_string}"
                )
                raise ef

            log.info(
                f"Timestamp event {event_time} matches the pattern {format_string}"
            )
Ejemplo n.º 12
0
    def test_rgw_kafka_notifications(self, bucket_factory):
        """
        Test to verify rgw kafka notifications

        """
        # Get sc
        sc = default_storage_class(interface_type=constants.CEPHBLOCKPOOL)

        # Deploy amq cluster
        self.amq.setup_amq_cluster(sc.name)

        # Create topic
        self.kafka_topic = self.amq.create_kafka_topic()

        # Create Kafkadrop pod
        (
            self.kafkadrop_pod,
            self.kafkadrop_pod,
            self.kafkadrop_route,
        ) = self.amq.create_kafkadrop()

        # Get the kafkadrop route
        kafkadrop_host = self.kafkadrop_route.get().get("spec").get("host")

        # Create bucket
        bucketname = bucket_factory(amount=1, interface="RGW-OC")[0].name

        # Get RGW credentials
        rgw_obj = RGW()
        rgw_endpoint, access_key, secret_key = rgw_obj.get_credentials()

        # Clone notify repo
        notify_path = clone_notify()

        # Initialise to put objects
        data = "A random string data to write on created rgw bucket"
        obc_obj = OBC(bucketname)
        s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=rgw_endpoint,
            aws_access_key_id=obc_obj.access_key_id,
            aws_secret_access_key=obc_obj.access_key,
        )
        s3_client = s3_resource.meta.client

        # Initialize notify command to run
        notify_cmd = (
            f"python {notify_path} -e {rgw_endpoint} -a {obc_obj.access_key_id} "
            f"-s {obc_obj.access_key} -b {bucketname} -ke {constants.KAFKA_ENDPOINT} -t {self.kafka_topic.name}"
        )
        log.info(f"Running cmd {notify_cmd}")

        # Put objects to bucket
        assert s3_client.put_object(Bucket=bucketname, Key="key-1",
                                    Body=data), "Failed: Put object: key-1"
        exec_cmd(notify_cmd)

        # Validate rgw logs notification are sent
        # No errors are seen
        pattern = "ERROR: failed to create push endpoint"
        rgw_pod_obj = get_rgw_pods()
        rgw_log = get_pod_logs(pod_name=rgw_pod_obj[0].name, container="rgw")
        assert re.search(pattern=pattern, string=rgw_log) is None, (
            f"Error: {pattern} msg found in the rgw logs."
            f"Validate {pattern} found on rgw logs and also "
            f"rgw bucket notification is working correctly")
        assert s3_client.put_object(Bucket=bucketname, Key="key-2",
                                    Body=data), "Failed: Put object: key-2"
        exec_cmd(notify_cmd)

        # Validate message are received Kafka side using curl command
        # A temporary way to check from Kafka side, need to check from UI
        curl_command = (
            f"curl -X GET {kafkadrop_host}/topic/{self.kafka_topic.name} "
            "-H 'content-type: application/vnd.kafka.json.v2+json'")
        json_output = run_cmd(cmd=curl_command)
        new_string = json_output.split()
        messages = new_string[new_string.index("messages</td>") + 1]
        if messages.find("1") == -1:
            raise Exception(
                "Error: Messages are not recieved from Kafka side."
                "RGW bucket notification is not working as expected.")
Ejemplo n.º 13
0
def measure_obc_deletion_time(obc_name_list, timeout=60):
    """
    Measure OBC deletion time
    Args:
        obc_name_list (list): List of obc names to measure deletion time
        timeout (int): Wait time in second before collecting log
    Returns:
        obc_dict (dict): Dictionary of obcs and deletion time in second

    """
    # Get obc deletion logs
    nb_pod_name = get_pod_name_by_pattern("noobaa-operator-")
    nb_pod_log = pod.get_pod_logs(
        pod_name=nb_pod_name[0],
        namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
    nb_pod_log = nb_pod_log.split("\n")

    loop_cnt = 0
    while True:
        no_data = list()
        for obc_name in obc_name_list:
            start = [
                i for i in nb_pod_log
                if re.search(f"removing ObjectBucket.*{obc_name}", i)
            ]
            end = [
                i for i in nb_pod_log
                if re.search(f"ObjectBucket deleted.*{obc_name}", i)
            ]
            if not start or not end:
                no_data.append(obc_name)
        if no_data:
            time.sleep(timeout)
            nb_pod_name = get_pod_name_by_pattern("noobaa-operator-")
            nb_pod_log = pod.get_pod_logs(
                pod_name=nb_pod_name[0],
                namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
            nb_pod_log = nb_pod_log.split("\n")
            loop_cnt += 1
            if loop_cnt >= 10:
                log.info("Waited for more than 10 mins but still no data")
                raise UnexpectedBehaviour(
                    f"There is no obc deletion data in noobaa-operator logs for {no_data}"
                )
            continue
        else:
            break

    obc_dict = dict()
    this_year = str(datetime.datetime.now().year)
    for obc_name in obc_name_list:
        # Extract obc deletion start time
        start_item = [
            i for i in nb_pod_log
            if re.search(f"removing ObjectBucket.*{obc_name}", i)
        ]
        mon_day = " ".join(start_item[0].split(" ")[0:2])
        start = f"{this_year} {mon_day}"
        dt_start = datetime.datetime.strptime(start, "%Y I%m%d %H:%M:%S.%f")

        # Extract obc deletion end time
        end_item = [
            i for i in nb_pod_log
            if re.search(f"ObjectBucket deleted.*{obc_name}", i)
        ]
        mon_day = " ".join(end_item[0].split(" ")[0:2])
        end = f"{this_year} {mon_day}"
        dt_end = datetime.datetime.strptime(end, "%Y I%m%d %H:%M:%S.%f")
        total = dt_end - dt_start
        log.info(f"{obc_name}: {total.total_seconds()} sec")
        obc_dict[obc_name] = total.total_seconds()

    return obc_dict