Beispiel #1
0
def collect_noobaa_db_dump(log_dir_path):
    """
    Collect the Noobaa DB dump

    Args:
        log_dir_path (str): directory for dumped Noobaa DB

    """
    from ocs_ci.ocs.resources.pod import (
        get_pods_having_label,
        download_file_from_pod,
        Pod,
    )

    nb_db_label = (constants.NOOBAA_DB_LABEL_46_AND_UNDER
                   if float(ocsci_config.ENV_DATA["ocs_version"]) < 4.7 else
                   constants.NOOBAA_DB_LABEL_47_AND_ABOVE)
    nb_db_pod = Pod(**get_pods_having_label(
        label=nb_db_label, namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])
    ocs_log_dir_path = os.path.join(log_dir_path, "noobaa_db_dump")
    create_directory_path(ocs_log_dir_path)
    ocs_log_dir_path = os.path.join(ocs_log_dir_path, "nbcore.gz")
    if float(ocsci_config.ENV_DATA["ocs_version"]) < 4.7:
        cmd = "mongodump --archive=nbcore.gz --gzip --db=nbcore"
    else:
        cmd = 'bash -c "pg_dump nbcore | gzip > nbcore.gz"'

    nb_db_pod.exec_cmd_on_pod(cmd)
    download_file_from_pod(
        pod_name=nb_db_pod.name,
        remotepath="/opt/app-root/src/nbcore.gz",
        localpath=ocs_log_dir_path,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE,
    )
Beispiel #2
0
def collect_noobaa_db_dump(log_dir_path):
    """
    Collect the Noobaa DB dump

    Args:
        log_dir_path (str): directory for dumped Noobaa DB

    """
    from ocs_ci.ocs.resources.pod import (
        get_pods_having_label,
        download_file_from_pod,
        Pod,
    )

    nb_db_pod = Pod(
        **get_pods_having_label(
            label=constants.NOOBAA_DB_LABEL, namespace=defaults.ROOK_CLUSTER_NAMESPACE
        )[0]
    )
    ocs_log_dir_path = os.path.join(log_dir_path, "noobaa_db_dump")
    create_directory_path(ocs_log_dir_path)
    ocs_log_dir_path = os.path.join(ocs_log_dir_path, "nbcore.gz")
    nb_db_pod.exec_cmd_on_pod("mongodump --archive=nbcore.gz --gzip --db=nbcore")
    download_file_from_pod(
        pod_name=nb_db_pod.name,
        remotepath="/opt/app-root/src/nbcore.gz",
        localpath=ocs_log_dir_path,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE,
    )
Beispiel #3
0
def collect_noobaa_db_dump(log_dir_path):
    """
    Collect the Noobaa DB dump

    Args:
        log_dir_path (str): directory for dumped Noobaa DB

    """
    from ocs_ci.ocs.resources.pod import (
        get_pods_having_label,
        download_file_from_pod,
        Pod,
    )

    ocs_version = version.get_semantic_ocs_version_from_config()
    nb_db_label = (
        constants.NOOBAA_DB_LABEL_46_AND_UNDER
        if ocs_version < version.VERSION_4_7
        else constants.NOOBAA_DB_LABEL_47_AND_ABOVE
    )
    try:
        nb_db_pod = Pod(
            **get_pods_having_label(
                label=nb_db_label, namespace=defaults.ROOK_CLUSTER_NAMESPACE
            )[0]
        )
    except IndexError:
        log.warning(
            "Unable to find pod using label `%s` in namespace `%s`",
            nb_db_label,
            defaults.ROOK_CLUSTER_NAMESPACE,
        )
        return
    ocs_log_dir_path = os.path.join(log_dir_path, "noobaa_db_dump")
    create_directory_path(ocs_log_dir_path)
    ocs_log_dir_path = os.path.join(ocs_log_dir_path, "nbcore.gz")
    if ocs_version < version.VERSION_4_7:
        cmd = "mongodump --archive=nbcore.gz --gzip --db=nbcore"
    else:
        cmd = 'bash -c "pg_dump nbcore | gzip > nbcore.gz"'

    nb_db_pod.exec_cmd_on_pod(cmd)
    download_file_from_pod(
        pod_name=nb_db_pod.name,
        remotepath="/opt/app-root/src/nbcore.gz",
        localpath=ocs_log_dir_path,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE,
    )
Beispiel #4
0
    def test_rbd_based_rwo_pvc(self, reclaim_policy):
        """
        Verifies RBD Based RWO Dynamic PVC creation with Reclaim policy set to
        Delete/Retain

        Steps:
        1. Create Storage Class with reclaimPolicy: Delete/Retain
        2. Create PVC with 'accessModes' 'ReadWriteOnce'
        3. Create two pods using same PVC
        4. Run IO on first pod
        5. Verify second pod is not getting into Running state
        6. Delete first pod
        7. Verify second pod is in Running state
        8. Verify usage of volume in second pod is matching with usage in
           first pod
        9. Run IO on second pod
        10. Delete second pod
        11. Delete PVC
        12. Verify PV associated with deleted PVC is also deleted/released
        """
        # Create Storage Class with reclaimPolicy: Delete
        sc_obj = helpers.create_storage_class(
            interface_type=constants.CEPHBLOCKPOOL,
            interface_name=self.cbp_obj.name,
            secret_name=self.rbd_secret_obj.name,
            reclaim_policy=reclaim_policy
        )

        # Create PVC with 'accessModes' 'ReadWriteOnce'
        pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
        pvc_data['metadata']['name'] = helpers.create_unique_resource_name(
            'test', 'pvc'
        )
        pvc_data['metadata']['namespace'] = self.namespace
        pvc_data['spec']['storageClassName'] = sc_obj.name
        pvc_data['spec']['accessModes'] = ['ReadWriteOnce']
        pvc_obj = PVC(**pvc_data)
        pvc_obj.create()

        # Create first pod
        log.info(f"Creating two pods which use PVC {pvc_obj.name}")
        pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
        pod_data['metadata']['name'] = helpers.create_unique_resource_name(
            'test', 'pod'
        )
        pod_data['metadata']['namespace'] = self.namespace
        pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name

        pod_obj = Pod(**pod_data)
        pod_obj.create()
        assert helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)

        node_pod1 = pod_obj.get()['spec']['nodeName']

        # Create second pod
        # Try creating pod until it is on a different node than first pod
        for retry in range(1, 6):
            pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
            pod_data['metadata']['name'] = helpers.create_unique_resource_name(
                'test', 'pod'
            )
            pod_data['metadata']['namespace'] = self.namespace
            pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name
            pod_obj2 = Pod(**pod_data)
            pod_obj2.create()
            assert helpers.wait_for_resource_state(pod_obj2, constants.STATUS_PENDING)

            node_pod2 = pod_obj2.get()['spec']['nodeName']
            if node_pod1 != node_pod2:
                break
            log.info(
                f"Both pods are on same node. Deleting second pod and "
                f"creating another pod. Retry count:{retry}"
            )
            pod_obj2.delete()
            if retry == 5:
                raise UnexpectedBehaviour(
                    "Second pod is always created on same node as of first "
                    "pod even after trying 5 times."
                )

        # Run IO on first pod
        log.info(f"Running IO on first pod {pod_obj.name}")
        pod_obj.run_io('fs', '1G')
        logging.info(f"Waiting for IO results from pod {pod_obj.name}")
        fio_result = pod_obj.get_fio_results()
        logging.info("IOPs after FIO:")
        logging.info(
            f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
        )
        logging.info(
            f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
        )

        # Fetch usage details
        mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
        mount_point = mount_point.split()
        usage = mount_point[mount_point.index('/var/lib/www/html') - 1]

        # Verify that second pod is not getting into Running state. Check it
        # for some period of time.
        try:
            assert not pod_obj2.ocp.wait_for_resource(
                condition='Running', resource_name=pod_obj2.name,
            ), "Unexpected: Second pod is in Running state"
        except TimeoutExpiredError:
            log.info(
                f"Verified: Second pod {pod_obj2.name} is not in "
                f"Running state"
            )

        # Delete first pod
        pod_obj.delete(wait=True)

        # Verify pod is deleted
        try:
            pod_obj.get()
            raise UnexpectedBehaviour(
                f"First pod {pod_obj.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to fetch pod details"
            )
            log.info(f"First pod {pod_obj.name} is deleted.")

        # Wait for second pod to be in Running state
        try:
            pod_obj2.ocp.wait_for_resource(
                condition='Running', resource_name=pod_obj2.name, timeout=180
            )
        except TimeoutExpiredError as exp:
            raise TimeoutExpiredError(
                f"Second pod {pod_obj2.name} is not in Running state "
                f"after deleting first pod."
            ) from exp
        log.info(
            f"Second pod {pod_obj2.name} is in Running state after "
            f"deleting the first pod."
        )

        # Verify that volume usage in second pod is matching with the usage in
        # first pod
        mount_point = pod_obj2.exec_cmd_on_pod(command="df -kh")
        mount_point = mount_point.split()
        usage_re = mount_point[mount_point.index('/var/lib/www/html') - 1]
        assert usage_re == usage, (
            "Use percentage in new pod is not matching with old pod"
        )

        # Run IO on second pod
        log.info(f"Running IO on second pod {pod_obj2.name}")
        pod_obj2.run_io('fs', '1G')
        logging.info(f"Waiting for IO results from pod {pod_obj2.name}")
        fio_result = pod_obj2.get_fio_results()
        logging.info("IOPs after FIO:")
        logging.info(
            f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
        )
        logging.info(
            f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
        )

        # Delete second pod
        pod_obj2.delete()

        # Verify pod is deleted
        try:
            pod_obj2.get()
            raise UnexpectedBehaviour(
                f"Second pod {pod_obj2.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to fetch pod details"
            )
            log.info(f"Second pod {pod_obj2.name} is deleted.")

        # Get PV name
        pvc_obj.reload()
        pv_name = pvc_obj.backed_pv

        # Delete PVC
        pvc_obj.delete()

        # Verify PVC is deleted
        try:
            pvc_obj.get()
            raise UnexpectedBehaviour(
                f"PVC {pvc_obj.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to verify PVC deletion."
            )
            log.info(f"PVC {pvc_obj.name} is deleted.")

        pv_obj = OCP(
            kind=constants.PV, namespace=self.namespace
        )

        if reclaim_policy == "Delete":
            # Verify PV is deleted
            for pv_info in TimeoutSampler(
                    30, 2, pv_obj.get, out_yaml_format=False
            ):
                if pv_name not in pv_info:
                    break
                log.warning(
                    f"PV {pv_name} exists after deleting PVC {pvc_obj.name}. "
                    f"Checking again."
                )

            # TODO: Verify PV using ceph toolbox. PV should be deleted.
            # Blocked by bz 1723656

        elif reclaim_policy == "Retain":
            # Wait for PV to be in Released state
            assert pv_obj.wait_for_resource(
                condition='Released', resource_name=pv_name
            )
            log.info(f"PV {pv_name} is in Released state")

            # TODO: Delete PV from backend and verify
            # Blocked by bz 1723656
            pv_obj.delete(resource_name=pv_name)

        # Delete Storage Class
        sc_obj.delete()