예제 #1
0
def collect_noobaa_db_dump(log_dir_path):
    """
    Collect the Noobaa DB dump

    Args:
        log_dir_path (str): directory for dumped Noobaa DB

    """
    from ocs_ci.ocs.resources.pod import (
        get_pods_having_label,
        download_file_from_pod,
        Pod,
    )

    ocs_version = version.get_semantic_ocs_version_from_config()
    nb_db_label = (constants.NOOBAA_DB_LABEL_46_AND_UNDER
                   if ocs_version < version.VERSION_4_7 else
                   constants.NOOBAA_DB_LABEL_47_AND_ABOVE)
    try:
        nb_db_pod = Pod(**get_pods_having_label(
            label=nb_db_label, namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])
    except IndexError:
        log.warning(
            "Unable to find pod using label `%s` in namespace `%s`",
            nb_db_label,
            defaults.ROOK_CLUSTER_NAMESPACE,
        )
        return
    ocs_log_dir_path = os.path.join(log_dir_path, "noobaa_db_dump")
    create_directory_path(ocs_log_dir_path)
    ocs_log_dir_path = os.path.join(ocs_log_dir_path, "nbcore.gz")
    if ocs_version < version.VERSION_4_7:
        cmd = "mongodump --archive=nbcore.gz --gzip --db=nbcore"
    else:
        cmd = 'bash -c "pg_dump nbcore | gzip > nbcore.gz"'

    nb_db_pod.exec_cmd_on_pod(cmd)
    download_file_from_pod(
        pod_name=nb_db_pod.name,
        remotepath="/opt/app-root/src/nbcore.gz",
        localpath=ocs_log_dir_path,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE,
    )
예제 #2
0
def collect_noobaa_db_dump(log_dir_path):
    """
    Collect the Noobaa DB dump

    Args:
        log_dir_path (str): directory for dumped Noobaa DB

    """
    from ocs_ci.ocs.resources.pod import get_pods_having_label, download_file_from_pod, Pod
    nb_db_pod = Pod(
        **get_pods_having_label(label=constants.NOOBAA_DB_LABEL,
                                namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])
    ocs_log_dir_path = os.path.join(log_dir_path, 'noobaa_db_dump')
    create_directory_path(ocs_log_dir_path)
    ocs_log_dir_path = os.path.join(ocs_log_dir_path, 'nbcore.gz')
    nb_db_pod.exec_cmd_on_pod(
        "mongodump --archive=nbcore.gz --gzip --db=nbcore")
    download_file_from_pod(pod_name=nb_db_pod.name,
                           remotepath="/opt/app-root/src/nbcore.gz",
                           localpath=ocs_log_dir_path,
                           namespace=defaults.ROOK_CLUSTER_NAMESPACE)
예제 #3
0
파일: utils.py 프로젝트: gitsridhar/ocs-ci
def collect_noobaa_db_dump(log_dir_path):
    """
    Collect the Noobaa DB dump

    Args:
        log_dir_path (str): directory for dumped Noobaa DB

    """
    from ocs_ci.ocs.resources.pod import (
        get_pods_having_label,
        download_file_from_pod,
        Pod,
    )

    nb_db_label = (
        constants.NOOBAA_DB_LABEL_46_AND_UNDER
        if float(ocsci_config.ENV_DATA["ocs_version"]) < 4.7
        else constants.NOOBAA_DB_LABEL_47_AND_ABOVE
    )
    nb_db_pod = Pod(
        **get_pods_having_label(
            label=nb_db_label, namespace=defaults.ROOK_CLUSTER_NAMESPACE
        )[0]
    )
    ocs_log_dir_path = os.path.join(log_dir_path, "noobaa_db_dump")
    create_directory_path(ocs_log_dir_path)
    ocs_log_dir_path = os.path.join(ocs_log_dir_path, "nbcore.gz")
    if float(ocsci_config.ENV_DATA["ocs_version"]) < 4.7:
        cmd = "mongodump --archive=nbcore.gz --gzip --db=nbcore"
    else:
        cmd = 'bash -c "pg_dump nbcore | gzip > nbcore.gz"'

    nb_db_pod.exec_cmd_on_pod(cmd)
    download_file_from_pod(
        pod_name=nb_db_pod.name,
        remotepath="/opt/app-root/src/nbcore.gz",
        localpath=ocs_log_dir_path,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE,
    )
예제 #4
0
    def pods(self):
        """
        Returns list of pods of the Deployment resource

        Returns:
            list: Deployment's pods
        """
        selectors = self.data.get("spec").get("selector").get("matchLabels")
        selectors = [f"{key}={selectors[key]}" for key in selectors.keys()]
        selectors_string = ",".join(selectors)
        return [
            Pod(**pod_data) for pod_data in get_pods_having_label(
                selectors_string, self.namespace)
        ]
def pod(request, pvc_factory, pod_factory, interface_iterate):
    """
    Creates a pod with git pre-installed in it and attach PVC to it.
    """
    pvc_obj = pvc_factory(interface=interface_iterate,
                          status=constants.STATUS_BOUND)
    pod_dict = templating.load_yaml(constants.CSI_CEPHFS_POD_YAML)
    # The image below is a mirror of hub.docker.com/library/alpine mirrored by Google
    pod_dict["spec"]["containers"][0]["image"] = "mirror.gcr.io/library/alpine"
    pod_dict["spec"]["containers"][0]["command"] = [
        "sh",
        "-c",
        "mkdir -p /var/www/html && tail -f /dev/null",
    ]
    pod_dict["spec"]["volumes"][0]["persistentVolumeClaim"][
        "claimName"] = pvc_obj.name
    ocs_obj = pod_factory(custom_data=pod_dict,
                          interface=interface_iterate,
                          pvc=pvc_obj)
    pod_yaml = ocs_obj.get()
    pod = Pod(**pod_yaml)
    return pod
예제 #6
0
파일: mcg.py 프로젝트: pkalever/ocs-ci
    def __init__(self, *args, **kwargs):
        """
        Constructor for the MCG class
        """
        self.namespace = config.ENV_DATA["cluster_namespace"]
        self.operator_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_OPERATOR_POD_LABEL, self.namespace)[0])
        self.core_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_CORE_POD_LABEL, self.namespace)[0])

        self.retrieve_noobaa_cli_binary()
        """
        The certificate will be copied on each mcg_obj instantiation since
        the process is so light and quick, that the time required for the redundant
        copy is neglible in comparison to the time a hash comparison will take.
        """
        retrieve_default_ingress_crt()

        get_noobaa = OCP(kind="noobaa", namespace=self.namespace).get()

        self.s3_endpoint = (get_noobaa.get("items")[0].get("status").get(
            "services").get("serviceS3").get("externalDNS")[0])
        self.s3_internal_endpoint = (get_noobaa.get("items")[0].get(
            "status").get("services").get("serviceS3").get("internalDNS")[0])
        self.mgmt_endpoint = (get_noobaa.get("items")[0].get("status").get(
            "services").get("serviceMgmt").get("externalDNS")[0]) + "/rpc"
        self.region = config.ENV_DATA["region"]

        creds_secret_name = (get_noobaa.get("items")[0].get("status").get(
            "accounts").get("admin").get("secretRef").get("name"))
        secret_ocp_obj = OCP(kind="secret", namespace=self.namespace)
        creds_secret_obj = secret_ocp_obj.get(creds_secret_name)

        self.access_key_id = base64.b64decode(
            creds_secret_obj.get("data").get("AWS_ACCESS_KEY_ID")).decode(
                "utf-8")
        self.access_key = base64.b64decode(
            creds_secret_obj.get("data").get("AWS_SECRET_ACCESS_KEY")).decode(
                "utf-8")

        self.noobaa_user = base64.b64decode(
            creds_secret_obj.get("data").get("email")).decode("utf-8")
        self.noobaa_password = base64.b64decode(
            creds_secret_obj.get("data").get("password")).decode("utf-8")

        self.noobaa_token = self.retrieve_nb_token()

        self.s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key,
        )

        self.s3_client = self.s3_resource.meta.client

        if config.ENV_DATA["platform"].lower() == "aws" and kwargs.get(
                "create_aws_creds"):
            (
                self.cred_req_obj,
                self.aws_access_key_id,
                self.aws_access_key,
            ) = self.request_aws_credentials()

            self.aws_s3_resource = boto3.resource(
                "s3",
                endpoint_url="https://s3.amazonaws.com",
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_access_key,
            )

        if (config.ENV_DATA["platform"].lower() in constants.CLOUD_PLATFORMS
                or storagecluster_independent_check()):
            if not config.ENV_DATA["platform"] == constants.AZURE_PLATFORM and (
                    float(config.ENV_DATA["ocs_version"]) > 4.5):
                logger.info("Checking whether RGW pod is not present")
                pods = pod.get_pods_having_label(label=constants.RGW_APP_LABEL,
                                                 namespace=self.namespace)
                assert (
                    not pods
                ), "RGW pods should not exist in the current platform/cluster"

        elif config.ENV_DATA.get("platform") in constants.ON_PREM_PLATFORMS:
            rgw_count = get_rgw_count(config.ENV_DATA["ocs_version"],
                                      check_if_cluster_was_upgraded(), None)
            logger.info(
                f'Checking for RGW pod/s on {config.ENV_DATA.get("platform")} platform'
            )
            rgw_pod = OCP(kind=constants.POD, namespace=self.namespace)
            assert rgw_pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=constants.RGW_APP_LABEL,
                resource_count=rgw_count,
                timeout=60,
            )
예제 #7
0
def patch_consumer_toolbox(ceph_admin_key=None):
    """
    Patch the rook-ceph-tools deployment with ceph.admin key. Applicable for MS platform only to enable rook-ceph-tools
    to run ceph commands.

    Args:
        ceph_admin_key (str): The ceph admin key which should be used to patch rook-ceph-tools deployment on consumer

    """

    # Get the admin key if available
    ceph_admin_key = (ceph_admin_key or os.environ.get("CEPHADMINKEY")
                      or config.AUTH.get("external", {}).get("ceph_admin_key"))

    if not ceph_admin_key:
        # TODO: Get the key from provider rook-ceph-tools pod after implementing multicluster deployment
        logger.warning(
            "Ceph admin key not found to patch rook-ceph-tools deployment on consumer with ceph.admin key. "
            "Skipping the step.")
        return

    consumer_tools_pod = get_ceph_tools_pod()

    # Check whether ceph command is working on tools pod. Patch is needed only if the error is "RADOS permission error"
    try:
        consumer_tools_pod.exec_ceph_cmd("ceph health")
        return
    except Exception as exc:
        if "RADOS permission error" not in str(exc):
            logger.warning(
                f"Ceph command on rook-ceph-tools deployment is failing with error {str(exc)}. "
                "This error cannot be fixed by patching the rook-ceph-tools deployment with ceph admin key."
            )
            return

    consumer_tools_deployment = ocp.OCP(
        kind=constants.DEPLOYMENT,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE,
        resource_name="rook-ceph-tools",
    )
    patch_value = (
        f'[{{"op": "replace", "path": "/spec/template/spec/containers/0/env", '
        f'"value":[{{"name": "ROOK_CEPH_USERNAME", "value": "client.admin"}}, '
        f'{{"name": "ROOK_CEPH_SECRET", "value": "{ceph_admin_key}"}}]}}]')
    try:
        consumer_tools_deployment.patch(params=patch_value, format_type="json")
    except Exception as exe:
        logger.warning(
            "Failed to patch rook-ceph-tools deployment in consumer cluster. "
            f"The patch can be applied manually after deployment. Error {str(exe)}"
        )
        return

    # Wait for the existing tools pod to delete
    consumer_tools_pod.ocp.wait_for_delete(
        resource_name=consumer_tools_pod.name)

    # Wait for the new tools pod to reach Running state
    new_tools_pod_info = get_pods_having_label(
        label=constants.TOOL_APP_LABEL,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE,
    )[0]
    new_tools_pod = Pod(**new_tools_pod_info)
    helpers.wait_for_resource_state(new_tools_pod, constants.STATUS_RUNNING)
예제 #8
0
    def __init__(self, *args, **kwargs):
        """
        Constructor for the MCG class
        """
        self.namespace = config.ENV_DATA['cluster_namespace']
        self.operator_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_OPERATOR_POD_LABEL, self.namespace)[0])
        self.core_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_CORE_POD_LABEL, self.namespace)[0])

        self.retrieve_noobaa_cli_binary()
        """
        The certificate will be copied on each mcg_obj instantiation since
        the process is so light and quick, that the time required for the redundant
        copy is neglible in comparison to the time a hash comparison will take.
        """
        retrieve_default_ingress_crt()

        get_noobaa = OCP(kind='noobaa', namespace=self.namespace).get()

        self.s3_endpoint = (get_noobaa.get('items')[0].get('status').get(
            'services').get('serviceS3').get('externalDNS')[0])
        self.s3_internal_endpoint = (get_noobaa.get('items')[0].get(
            'status').get('services').get('serviceS3').get('internalDNS')[0])
        self.mgmt_endpoint = (get_noobaa.get('items')[0].get('status').get(
            'services').get('serviceMgmt').get('externalDNS')[0]) + '/rpc'
        self.region = config.ENV_DATA['region']

        creds_secret_name = (get_noobaa.get('items')[0].get('status').get(
            'accounts').get('admin').get('secretRef').get('name'))
        secret_ocp_obj = OCP(kind='secret', namespace=self.namespace)
        creds_secret_obj = secret_ocp_obj.get(creds_secret_name)

        self.access_key_id = base64.b64decode(
            creds_secret_obj.get('data').get('AWS_ACCESS_KEY_ID')).decode(
                'utf-8')
        self.access_key = base64.b64decode(
            creds_secret_obj.get('data').get('AWS_SECRET_ACCESS_KEY')).decode(
                'utf-8')

        self.noobaa_user = base64.b64decode(
            creds_secret_obj.get('data').get('email')).decode('utf-8')
        self.noobaa_password = base64.b64decode(
            creds_secret_obj.get('data').get('password')).decode('utf-8')

        self.noobaa_token = self.send_rpc_query(
            'auth_api',
            'create_auth',
            params={
                'role': 'admin',
                'system': 'noobaa',
                'email': self.noobaa_user,
                'password': self.noobaa_password
            }).json().get('reply').get('token')

        self.s3_resource = boto3.resource(
            's3',
            verify=constants.DEFAULT_INGRESS_CRT_LOCAL_PATH,
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key)

        self.s3_client = self.s3_resource.meta.client

        if (config.ENV_DATA['platform'].lower() == 'aws'
                and kwargs.get('create_aws_creds')):
            (self.cred_req_obj, self.aws_access_key_id,
             self.aws_access_key) = self.request_aws_credentials()

            self.aws_s3_resource = boto3.resource(
                's3',
                endpoint_url="https://s3.amazonaws.com",
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_access_key)
            logger.info(
                'Checking whether RGW pod is not present on AWS platform')
            pods = pod.get_pods_having_label(label=constants.RGW_APP_LABEL,
                                             namespace=self.namespace)
            assert len(pods) == 0, 'RGW pod should not exist on AWS platform'

        elif config.ENV_DATA.get('platform') in constants.ON_PREM_PLATFORMS:
            rgw_count = 2 if float(
                config.ENV_DATA['ocs_version']) >= 4.5 else 1
            logger.info(
                f'Checking for RGW pod/s on {config.ENV_DATA.get("platform")} platform'
            )
            rgw_pod = OCP(kind=constants.POD, namespace=self.namespace)
            assert rgw_pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=constants.RGW_APP_LABEL,
                resource_count=rgw_count,
                timeout=60)
예제 #9
0
    def __init__(self):
        """
        Constructor for the MCG class
        """

        # Todo: find a better solution for not being able to verify requests with a self-signed cert
        logger.warning('Suppressing InsecureRequestWarnings')
        urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

        self.namespace = config.ENV_DATA['cluster_namespace']
        ocp_obj = OCP(kind='noobaa', namespace=self.namespace)
        results = ocp_obj.get()
        self.s3_endpoint = (results.get('items')[0].get('status').get(
            'services').get('serviceS3').get('externalDNS')[-1])
        self.mgmt_endpoint = (results.get('items')[0].get('status').get(
            'services').get('serviceMgmt').get('externalDNS')[-1]) + '/rpc'
        self.region = config.ENV_DATA['region']

        creds_secret_name = (results.get('items')[0].get('status').get(
            'accounts').get('admin').get('secretRef').get('name'))
        secret_ocp_obj = OCP(kind='secret', namespace=self.namespace)
        creds_secret_obj = secret_ocp_obj.get(creds_secret_name)

        self.access_key_id = base64.b64decode(
            creds_secret_obj.get('data').get('AWS_ACCESS_KEY_ID')).decode(
                'utf-8')
        self.access_key = base64.b64decode(
            creds_secret_obj.get('data').get('AWS_SECRET_ACCESS_KEY')).decode(
                'utf-8')

        self.noobaa_user = base64.b64decode(
            creds_secret_obj.get('data').get('email')).decode('utf-8')
        self.noobaa_password = base64.b64decode(
            creds_secret_obj.get('data').get('password')).decode('utf-8')

        self.noobaa_token = self.send_rpc_query(
            'auth_api',
            'create_auth',
            params={
                'role': 'admin',
                'system': 'noobaa',
                'email': self.noobaa_user,
                'password': self.noobaa_password
            }).json().get('reply').get('token')

        self.s3_resource = boto3.resource(
            's3',
            verify=False,
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key)

        self.s3_client = boto3.client('s3',
                                      verify=False,
                                      endpoint_url=self.s3_endpoint,
                                      aws_access_key_id=self.access_key_id,
                                      aws_secret_access_key=self.access_key)

        # Give NooBaa's ServiceAccount permissions in order to execute CLI commands
        registry.add_role_to_user('cluster-admin',
                                  constants.NOOBAA_SERVICE_ACCOUNT,
                                  cluster_role=True)

        self.operator_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_OPERATOR_POD_LABEL, self.namespace)[0])

        if config.ENV_DATA['platform'].lower() == 'aws':
            (self.cred_req_obj, self.aws_access_key_id,
             self.aws_access_key) = self.request_aws_credentials()

            self._ocp_resource = ocp_obj

            self.aws_s3_resource = boto3.resource(
                's3',
                verify=False,
                endpoint_url="https://s3.amazonaws.com",
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_access_key)
            logger.info(
                'Checking whether RGW pod is not present on AWS platform')
            pods = pod.get_pods_having_label(label=constants.RGW_APP_LABEL,
                                             namespace=self.namespace)
            assert len(pods) == 0, 'RGW pod should not exist on AWS platform'

        elif config.ENV_DATA.get('platform') == constants.VSPHERE_PLATFORM:
            logger.info('Checking for RGW pod on VSPHERE platform')
            rgw_pod = OCP(kind=constants.POD, namespace=self.namespace)
            assert rgw_pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector=constants.RGW_APP_LABEL,
                resource_count=1,
                timeout=60)
예제 #10
0
    def __init__(self, *args, **kwargs):
        """
        Constructor for the MCG class
        """
        self.namespace = config.ENV_DATA["cluster_namespace"]
        self.operator_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_OPERATOR_POD_LABEL, self.namespace)[0])
        self.core_pod = Pod(**get_pods_having_label(
            constants.NOOBAA_CORE_POD_LABEL, self.namespace)[0])
        wait_for_resource_state(resource=self.operator_pod,
                                state=constants.STATUS_RUNNING,
                                timeout=300)
        self.retrieve_noobaa_cli_binary()
        """
        The certificate will be copied on each mcg_obj instantiation since
        the process is so light and quick, that the time required for the redundant
        copy is neglible in comparison to the time a hash comparison will take.
        """
        retrieve_default_ingress_crt()

        get_noobaa = OCP(kind="noobaa", namespace=self.namespace).get()

        self.s3_endpoint = (get_noobaa.get("items")[0].get("status").get(
            "services").get("serviceS3").get("externalDNS")[0])
        self.s3_internal_endpoint = (get_noobaa.get("items")[0].get(
            "status").get("services").get("serviceS3").get("internalDNS")[0])
        self.mgmt_endpoint = (get_noobaa.get("items")[0].get("status").get(
            "services").get("serviceMgmt").get("externalDNS")[0]) + "/rpc"
        self.region = config.ENV_DATA["region"]

        creds_secret_name = (get_noobaa.get("items")[0].get("status").get(
            "accounts").get("admin").get("secretRef").get("name"))
        secret_ocp_obj = OCP(kind="secret", namespace=self.namespace)
        creds_secret_obj = secret_ocp_obj.get(creds_secret_name)

        self.access_key_id = base64.b64decode(
            creds_secret_obj.get("data").get("AWS_ACCESS_KEY_ID")).decode(
                "utf-8")
        self.access_key = base64.b64decode(
            creds_secret_obj.get("data").get("AWS_SECRET_ACCESS_KEY")).decode(
                "utf-8")

        self.noobaa_user = base64.b64decode(
            creds_secret_obj.get("data").get("email")).decode("utf-8")
        self.noobaa_password = base64.b64decode(
            creds_secret_obj.get("data").get("password")).decode("utf-8")

        self.noobaa_token = self.retrieve_nb_token()

        self.s3_resource = boto3.resource(
            "s3",
            verify=retrieve_verification_mode(),
            endpoint_url=self.s3_endpoint,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.access_key,
        )

        self.s3_client = self.s3_resource.meta.client

        if config.ENV_DATA["platform"].lower() == "aws" and kwargs.get(
                "create_aws_creds"):
            (
                self.cred_req_obj,
                self.aws_access_key_id,
                self.aws_access_key,
            ) = self.request_aws_credentials()

            self.aws_s3_resource = boto3.resource(
                "s3",
                endpoint_url="https://s3.amazonaws.com",
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_access_key,
            )
예제 #11
0
    def test_pvpool_cpu_and_memory_modifications(
        self,
        awscli_pod_session,
        backingstore_factory,
        bucket_factory,
        test_directory_setup,
        mcg_obj_session,
    ):
        """
        Test to modify the CPU and Memory resource limits for BS and see if its reflecting
        """
        bucketclass_dict = {
            "interface": "OC",
            "backingstore_dict": {
                "pv": [(
                    1,
                    MIN_PV_BACKINGSTORE_SIZE_IN_GB,
                    "ocs-storagecluster-ceph-rbd",
                )]
            },
        }
        bucket = bucket_factory(1, "OC", bucketclass=bucketclass_dict)[0]
        bucket_name = bucket.name
        pv_backingstore = bucket.bucketclass.backingstores[0]
        pv_bs_name = pv_backingstore.name
        pv_pod_label = f"pool={pv_bs_name}"
        pv_pod_info = get_pods_having_label(
            label=pv_pod_label,
            namespace=config.ENV_DATA["cluster_namespace"])[0]
        pv_pod_obj = Pod(**pv_pod_info)
        pv_pod_name = pv_pod_obj.name
        logger.info(
            f"Pod created for PV Backingstore {pv_bs_name}: {pv_pod_name}")
        new_cpu = "500m"
        new_mem = "500Mi"
        new_resource_patch = {
            "spec": {
                "pvPool": {
                    "resources": {
                        "limits": {
                            "cpu": f"{new_cpu}",
                            "memory": f"{new_mem}",
                        },
                        "requests": {
                            "cpu": f"{new_cpu}",
                            "memory": f"{new_mem}",
                        },
                    }
                }
            }
        }
        try:
            OCP(
                namespace=config.ENV_DATA["cluster_namespace"],
                kind="backingstore",
                resource_name=pv_bs_name,
            ).patch(params=json.dumps(new_resource_patch), format_type="merge")
        except CommandFailed as e:
            logger.error(f"[ERROR] Failed to patch: {e}")
        else:
            logger.info("Patched new resource limits")
        wait_for_pods_to_be_running(
            namespace=config.ENV_DATA["cluster_namespace"],
            pod_names=[pv_pod_name])
        pv_pod_ocp_obj = OCP(namespace=config.ENV_DATA["cluster_namespace"],
                             kind="pod").get(resource_name=pv_pod_name)
        resource_dict = pv_pod_ocp_obj["spec"]["containers"][0]["resources"]
        assert (
            resource_dict["limits"]["cpu"] == new_cpu
            and resource_dict["limits"]["memory"] == new_mem
            and resource_dict["requests"]["cpu"] == new_cpu
            and resource_dict["requests"]["memory"] == new_mem
        ), "New resource modification in Backingstore is not reflected in PV Backingstore Pod!!"
        logger.info(
            "Resource modification reflected in the PV Backingstore Pod!!")

        # push some data to the bucket
        file_dir = test_directory_setup.origin_dir
        copy_random_individual_objects(
            podobj=awscli_pod_session,
            file_dir=file_dir,
            target=f"s3://{bucket_name}",
            amount=1,
            s3_obj=OBC(bucket_name),
        )
예제 #12
0
    def test_rbd_based_rwo_pvc(self, reclaim_policy):
        """
        Verifies RBD Based RWO Dynamic PVC creation with Reclaim policy set to
        Delete/Retain

        Steps:
        1. Create Storage Class with reclaimPolicy: Delete/Retain
        2. Create PVC with 'accessModes' 'ReadWriteOnce'
        3. Create two pods using same PVC
        4. Run IO on first pod
        5. Verify second pod is not getting into Running state
        6. Delete first pod
        7. Verify second pod is in Running state
        8. Verify usage of volume in second pod is matching with usage in
           first pod
        9. Run IO on second pod
        10. Delete second pod
        11. Delete PVC
        12. Verify PV associated with deleted PVC is also deleted/released
        """
        # Create Storage Class with reclaimPolicy: Delete
        sc_obj = helpers.create_storage_class(
            interface_type=constants.CEPHBLOCKPOOL,
            interface_name=self.cbp_obj.name,
            secret_name=self.rbd_secret_obj.name,
            reclaim_policy=reclaim_policy
        )

        # Create PVC with 'accessModes' 'ReadWriteOnce'
        pvc_data = templating.load_yaml_to_dict(constants.CSI_PVC_YAML)
        pvc_data['metadata']['name'] = helpers.create_unique_resource_name(
            'test', 'pvc'
        )
        pvc_data['metadata']['namespace'] = self.namespace
        pvc_data['spec']['storageClassName'] = sc_obj.name
        pvc_data['spec']['accessModes'] = ['ReadWriteOnce']
        pvc_obj = PVC(**pvc_data)
        pvc_obj.create()

        # Create first pod
        log.info(f"Creating two pods which use PVC {pvc_obj.name}")
        pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
        pod_data['metadata']['name'] = helpers.create_unique_resource_name(
            'test', 'pod'
        )
        pod_data['metadata']['namespace'] = self.namespace
        pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name

        pod_obj = Pod(**pod_data)
        pod_obj.create()
        assert helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)

        node_pod1 = pod_obj.get()['spec']['nodeName']

        # Create second pod
        # Try creating pod until it is on a different node than first pod
        for retry in range(1, 6):
            pod_data = templating.load_yaml_to_dict(constants.CSI_RBD_POD_YAML)
            pod_data['metadata']['name'] = helpers.create_unique_resource_name(
                'test', 'pod'
            )
            pod_data['metadata']['namespace'] = self.namespace
            pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_obj.name
            pod_obj2 = Pod(**pod_data)
            pod_obj2.create()
            assert helpers.wait_for_resource_state(pod_obj2, constants.STATUS_PENDING)

            node_pod2 = pod_obj2.get()['spec']['nodeName']
            if node_pod1 != node_pod2:
                break
            log.info(
                f"Both pods are on same node. Deleting second pod and "
                f"creating another pod. Retry count:{retry}"
            )
            pod_obj2.delete()
            if retry == 5:
                raise UnexpectedBehaviour(
                    "Second pod is always created on same node as of first "
                    "pod even after trying 5 times."
                )

        # Run IO on first pod
        log.info(f"Running IO on first pod {pod_obj.name}")
        pod_obj.run_io('fs', '1G')
        logging.info(f"Waiting for IO results from pod {pod_obj.name}")
        fio_result = pod_obj.get_fio_results()
        logging.info("IOPs after FIO:")
        logging.info(
            f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
        )
        logging.info(
            f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
        )

        # Fetch usage details
        mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
        mount_point = mount_point.split()
        usage = mount_point[mount_point.index('/var/lib/www/html') - 1]

        # Verify that second pod is not getting into Running state. Check it
        # for some period of time.
        try:
            assert not pod_obj2.ocp.wait_for_resource(
                condition='Running', resource_name=pod_obj2.name,
            ), "Unexpected: Second pod is in Running state"
        except TimeoutExpiredError:
            log.info(
                f"Verified: Second pod {pod_obj2.name} is not in "
                f"Running state"
            )

        # Delete first pod
        pod_obj.delete(wait=True)

        # Verify pod is deleted
        try:
            pod_obj.get()
            raise UnexpectedBehaviour(
                f"First pod {pod_obj.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to fetch pod details"
            )
            log.info(f"First pod {pod_obj.name} is deleted.")

        # Wait for second pod to be in Running state
        try:
            pod_obj2.ocp.wait_for_resource(
                condition='Running', resource_name=pod_obj2.name, timeout=180
            )
        except TimeoutExpiredError as exp:
            raise TimeoutExpiredError(
                f"Second pod {pod_obj2.name} is not in Running state "
                f"after deleting first pod."
            ) from exp
        log.info(
            f"Second pod {pod_obj2.name} is in Running state after "
            f"deleting the first pod."
        )

        # Verify that volume usage in second pod is matching with the usage in
        # first pod
        mount_point = pod_obj2.exec_cmd_on_pod(command="df -kh")
        mount_point = mount_point.split()
        usage_re = mount_point[mount_point.index('/var/lib/www/html') - 1]
        assert usage_re == usage, (
            "Use percentage in new pod is not matching with old pod"
        )

        # Run IO on second pod
        log.info(f"Running IO on second pod {pod_obj2.name}")
        pod_obj2.run_io('fs', '1G')
        logging.info(f"Waiting for IO results from pod {pod_obj2.name}")
        fio_result = pod_obj2.get_fio_results()
        logging.info("IOPs after FIO:")
        logging.info(
            f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
        )
        logging.info(
            f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
        )

        # Delete second pod
        pod_obj2.delete()

        # Verify pod is deleted
        try:
            pod_obj2.get()
            raise UnexpectedBehaviour(
                f"Second pod {pod_obj2.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to fetch pod details"
            )
            log.info(f"Second pod {pod_obj2.name} is deleted.")

        # Get PV name
        pvc_obj.reload()
        pv_name = pvc_obj.backed_pv

        # Delete PVC
        pvc_obj.delete()

        # Verify PVC is deleted
        try:
            pvc_obj.get()
            raise UnexpectedBehaviour(
                f"PVC {pvc_obj.name} is not deleted."
            )
        except CommandFailed as exp:
            assert "not found" in str(exp), (
                "Failed to verify PVC deletion."
            )
            log.info(f"PVC {pvc_obj.name} is deleted.")

        pv_obj = OCP(
            kind=constants.PV, namespace=self.namespace
        )

        if reclaim_policy == "Delete":
            # Verify PV is deleted
            for pv_info in TimeoutSampler(
                    30, 2, pv_obj.get, out_yaml_format=False
            ):
                if pv_name not in pv_info:
                    break
                log.warning(
                    f"PV {pv_name} exists after deleting PVC {pvc_obj.name}. "
                    f"Checking again."
                )

            # TODO: Verify PV using ceph toolbox. PV should be deleted.
            # Blocked by bz 1723656

        elif reclaim_policy == "Retain":
            # Wait for PV to be in Released state
            assert pv_obj.wait_for_resource(
                condition='Released', resource_name=pv_name
            )
            log.info(f"PV {pv_name} is in Released state")

            # TODO: Delete PV from backend and verify
            # Blocked by bz 1723656
            pv_obj.delete(resource_name=pv_name)

        # Delete Storage Class
        sc_obj.delete()
예제 #13
0
def create_fio_pod(
    project,
    interface,
    pvc_factory,
    storageclass,
    access_mode,
    fio_job_dict,
    fio_configmap_dict,
    tmp_path,
    volume_mode=None,
    pvc_size=10,
):
    """
    Create pods for upgrade testing.

    Args:
        project (obj): Project in which to create resources
        interface (str): CephBlockPool or CephFileSystem
        pvc_factory (function): Function for creating PVCs
        storageclass (obj): Storageclass to use
        access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
            This decides the access mode to be used for the PVC
        fio_job_dict (dict): fio job dictionary to use
        fio_configmap_dict (dict): fio configmap dictionary to use
        tmp_path (obj): reference to tmp_path fixture object
        volume_mode (str): Volume mode for rbd RWO PVC
        pvc_size (int): Size of PVC in GiB

    Return:
        list: List of generated pods

    """
    log.info(f"Creating pod via {interface} using {access_mode}"
             f" access mode, {volume_mode} volume mode and {storageclass.name}"
             f" storageclass")
    pvc = pvc_factory(
        project=project,
        storageclass=storageclass,
        access_mode=access_mode,
        volume_mode=volume_mode,
        size=pvc_size,
        status=None,
    )
    helpers.wait_for_resource_state(pvc, constants.STATUS_BOUND, timeout=600)

    job_volume = fio_job_dict["spec"]["template"]["spec"]["volumes"][0]
    job_volume["persistentVolumeClaim"]["claimName"] = pvc.name
    fio_objs = [fio_configmap_dict, fio_job_dict]
    job_file = ObjectConfFile("fio_continuous", fio_objs, project, tmp_path)

    # deploy the Job to the cluster and start it
    job_file.create()

    ocp_pod_obj = ocp.OCP(kind=constants.POD, namespace=project.namespace)
    pods = ocp_pod_obj.get()["items"]
    for pod in pods:
        pod_volume = pod["spec"]["volumes"][0]
        if pod_volume["persistentVolumeClaim"]["claimName"] == pvc.name:
            pod_data = pod
            break

    return Pod(**pod_data)