Esempio n. 1
0
    def get(self, **kwargs):
        """
        Overloaded get method from OCP class.

        Raises:
            ResourceNotFoundError: In case the selector and resource_name
                specified and no such resource found.
        """
        resource_name = kwargs.get("resource_name", "")
        resource_name = resource_name if resource_name else self.resource_name
        selector = kwargs.get('selector')
        selector = selector if selector else self.selector

        data = super(PackageManifest, self).get(**kwargs)
        if type(data) == dict and (data.get('kind') == 'List'):
            items = data['items']
            data_len = len(items)
            if data_len == 0 and selector and resource_name:
                raise ResourceNotFoundError(
                    f"Requested packageManifest: {resource_name} with "
                    f"selector: {selector} not found!")
            if data_len == 1:
                return items[0]
            if data_len > 1 and resource_name:
                items_match_name = [
                    i for i in items if i['metadata']['name'] == resource_name
                ]
                if len(items_match_name) == 1:
                    return items_match_name[0]
                else:
                    return items_match_name
        return data
Esempio n. 2
0
def delete_machine_and_check_state_of_new_spinned_machine(machine_name):
    """
    Deletes a machine and checks the state of the newly spinned
    machine

    Args:
        machine_name (str): Name of the machine you want to delete

    Returns:
        machine (str): New machine name

    Raise:
        ResourceNotFoundError: Incase machine creation failed

    """
    machine_type = get_machine_type(machine_name)
    machine_list = get_machines(machine_type=machine_type)
    initial_machine_names = [machine.name for machine in machine_list]
    delete_machine(machine_name)
    new_machine_list = get_machines(machine_type=machine_type)
    new_machine = [
        machine for machine in new_machine_list
        if machine.name not in initial_machine_names
    ]
    if new_machine is not None:
        new_machine_name = new_machine[0].name
        log.info(
            f"Checking the state of new spinned machine {new_machine_name}")
        new_machine[0].ocp.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            resource_name=new_machine_name,
            column="PHASE",
            timeout=600,
            sleep=30,
        )
        log.info(f"{new_machine_name} is in {constants.STATUS_RUNNING} state")
        return new_machine_name
    else:
        raise ResourceNotFoundError("New Machine resource not found")
Esempio n. 3
0
def create_custom_machineset(
    role="app",
    instance_type="m4.xlarge",
    labels=None,
    taints=None,
    zone="a",
):
    """
    Function to create custom machineset works only for AWS
    i.e. Using this user can create nodes with different instance type and role.
    https://docs.openshift.com/container-platform/4.1/machine_management/creating-machineset.html

    Args:
        role (str): Role type to be added for node eg: it will be app,worker
        instance_type (str): Type of aws instance
        label (list): List of Labels (key, val) to be added to the node
        taints (list): List of taints to be applied
        zone (str): Machineset zone for node creation.

    Returns:
        machineset (str): Created machineset name

    Raise:
        ResourceNotFoundError: Incase machineset creation failed
        UnsupportedPlatformError: Incase of wrong platform

    """
    # check for platform, since it's supported only for IPI
    if config.ENV_DATA["deployment_type"] == "ipi":
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            region = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("placement").get("region"))
            aws_zone = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("placement").get(
                    "availabilityZone"))
            cls_id = (machine.get("spec").get("selector").get(
                "matchLabels").get("machine.openshift.io/cluster-api-cluster"))
            ami_id = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("ami").get("id"))
            if aws_zone == f"{region}{zone}":
                machineset_yaml = templating.load_yaml(
                    constants.MACHINESET_YAML)

                # Update machineset_yaml with required values.
                machineset_yaml["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["metadata"][
                    "name"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-role"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-type"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["ami"]["id"] = ami_id
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["iamInstanceProfile"][
                        "id"] = f"{cls_id}-worker-profile"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["instanceType"] = instance_type
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["placement"]["availabilityZone"] = aws_zone
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["placement"]["region"] = region
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["securityGroups"][0]["filters"][0]["values"][
                        0] = f"{cls_id}-worker-sg"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["subnet"]["filters"][0]["values"][
                        0] = f"{cls_id}-private-{aws_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["tags"][0][
                        "name"] = f"kubernetes.io/cluster/{cls_id}"

                # Apply the labels
                if labels:
                    for label in labels:
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"][label[0]] = label[1]

                # Apply the Taints
                # ex taint list looks like:
                # [ {'effect': 'NoSchedule',
                #    'key': 'node.ocs.openshift.io/storage',
                #    'value': 'true',
                #  }, {'effect': 'Schedule', 'key': 'xyz', 'value': 'False'} ]
                if taints:
                    machineset_yaml["spec"]["template"]["spec"].update(
                        {"taints": taints})

                # Create new custom machineset
                ms_obj = OCS(**machineset_yaml)
                ms_obj.create()
                if check_machineset_exists(f"{cls_id}-{role}-{aws_zone}"):
                    logging.info(
                        f"Machineset {cls_id}-{role}-{aws_zone} created")
                    return f"{cls_id}-{role}-{aws_zone}"
                else:
                    raise ResourceNotFoundError(
                        "Machineset resource not found")
    else:
        raise UnsupportedPlatformError("Functionality not supported in UPI")
Esempio n. 4
0
def add_capacity(osd_size_capacity_requested):
    """
    Add storage capacity to the cluster

    Args:
        osd_size_capacity_requested(int): Requested osd size capacity

    Returns:
        new storage device set count (int) : Returns True if all OSDs are in Running state

    Note:
    "StoragedeviceSets->count" represents the set of 3 OSDs.
    That is, if there are 3 OSDs in the system then count will be 1.
    If there are 6 OSDs then count is 2 and so on.
    By changing this value,we can add extra devices to the cluster.
    For example, if we want to expand the cluster by 3 more osds in a cluster that already has 3 osds,
    we can set count as 2. So, with each increase of count by 1,
    we get 3 OSDs extra added to the cluster.
    This is how we are going to 'add capacity' via automation.
    As we know that OCS has 3 way replica. That is, same data is placed in 3 OSDs.
    Because of this, the total usable capacity for apps from 3 OSDs
    will be the size of one OSD (all osds are of same size).
    If we want to add more capacity to the cluster then we need to add 3 OSDs of same size
    as that of the original OSD. add_capacity needs to accept the 'capacity_to_add' as an argument.
    From this we need to arrive at storagedeviceSets -> count and then
    "Patch" this count to get the required capacity to add.
    To do so, we use following formula:
    storageDeviceSets->count = (capacity reqested / osd capacity ) + existing count storageDeviceSets

    """
    osd_size_existing = get_osd_size()
    device_sets_required = int(osd_size_capacity_requested / osd_size_existing)
    old_storage_devices_sets_count = get_deviceset_count()
    new_storage_devices_sets_count = int(device_sets_required + old_storage_devices_sets_count)
    lvpresent = localstorage.check_local_volume()
    if lvpresent:
        ocp_obj = OCP(kind='localvolume', namespace=constants.LOCAL_STORAGE_NAMESPACE)
        localvolume_data = ocp_obj.get(resource_name='local-block')
        device_list = localvolume_data['spec']['storageClassDevices'][0]['devicePaths']
        final_device_list = localstorage.get_new_device_paths(device_sets_required, osd_size_capacity_requested)
        device_list.sort()
        final_device_list.sort()
        if device_list == final_device_list:
            raise ResourceNotFoundError("No Extra device found")
        param = f"""[{{ "op": "replace", "path": "/spec/storageClassDevices/0/devicePaths",
                                                 "value": {final_device_list}}}]"""
        log.info(f"Final device list : {final_device_list}")
        lvcr = localstorage.get_local_volume_cr()
        log.info("Patching Local Volume CR...")
        lvcr.patch(
            resource_name=lvcr.get()['items'][0]['metadata']['name'],
            params=param.strip('\n'),
            format_type='json'
        )
        localstorage.check_pvs_created(int(len(final_device_list) / new_storage_devices_sets_count))
    sc = get_storage_cluster()
    # adding the storage capacity to the cluster
    params = f"""[{{ "op": "replace", "path": "/spec/storageDeviceSets/0/count",
                "value": {new_storage_devices_sets_count}}}]"""
    sc.patch(
        resource_name=sc.get()['items'][0]['metadata']['name'],
        params=params.strip('\n'),
        format_type='json'
    )
    return new_storage_devices_sets_count
Esempio n. 5
0
def add_capacity(osd_size_capacity_requested,
                 add_extra_disk_to_existing_worker=True):
    """
    Add storage capacity to the cluster

    Args:
        osd_size_capacity_requested(int): Requested osd size capacity
        add_extra_disk_to_existing_worker(bool): Add Disk if True

    Returns:
        new storage device set count (int) : Returns True if all OSDs are in Running state

    Note:
    "StoragedeviceSets->count" represents the set of 3 OSDs.
    That is, if there are 3 OSDs in the system then count will be 1.
    If there are 6 OSDs then count is 2 and so on.
    By changing this value,we can add extra devices to the cluster.
    For example, if we want to expand the cluster by 3 more osds in a cluster that already has 3 osds,
    we can set count as 2. So, with each increase of count by 1,
    we get 3 OSDs extra added to the cluster.
    This is how we are going to 'add capacity' via automation.
    As we know that OCS has 3 way replica. That is, same data is placed in 3 OSDs.
    Because of this, the total usable capacity for apps from 3 OSDs
    will be the size of one OSD (all osds are of same size).
    If we want to add more capacity to the cluster then we need to add 3 OSDs of same size
    as that of the original OSD. add_capacity needs to accept the 'capacity_to_add' as an argument.
    From this we need to arrive at storagedeviceSets -> count and then
    "Patch" this count to get the required capacity to add.
    To do so, we use following formula:
    storageDeviceSets->count = (capacity reqested / osd capacity ) + existing count storageDeviceSets

    """
    lvpresent = None
    lv_set_present = None
    osd_size_existing = get_osd_size()
    device_sets_required = int(osd_size_capacity_requested / osd_size_existing)
    old_storage_devices_sets_count = get_deviceset_count()
    new_storage_devices_sets_count = int(device_sets_required +
                                         old_storage_devices_sets_count)
    is_lso = config.DEPLOYMENT.get("local_storage")
    if is_lso:
        lv_lvs_data = localstorage.check_local_volume_local_volume_set()
        if lv_lvs_data.get("localvolume"):
            lvpresent = True
        elif lv_lvs_data.get("localvolumeset"):
            lv_set_present = True
        else:
            log.info(lv_lvs_data)
            raise ResourceNotFoundError(
                "No LocalVolume and LocalVolume Set found")
    platform = config.ENV_DATA.get("platform", "").lower()
    if lvpresent:
        ocp_obj = OCP(kind="localvolume",
                      namespace=config.ENV_DATA["local_storage_namespace"])
        localvolume_data = ocp_obj.get(resource_name="local-block")
        device_list = localvolume_data["spec"]["storageClassDevices"][0][
            "devicePaths"]
        final_device_list = localstorage.get_new_device_paths(
            device_sets_required, osd_size_capacity_requested)
        device_list.sort()
        final_device_list.sort()
        if device_list == final_device_list:
            raise ResourceNotFoundError("No Extra device found")
        param = f"""[{{ "op": "replace", "path": "/spec/storageClassDevices/0/devicePaths",
                                                 "value": {final_device_list}}}]"""
        log.info(f"Final device list : {final_device_list}")
        lvcr = localstorage.get_local_volume_cr()
        log.info("Patching Local Volume CR...")
        lvcr.patch(
            resource_name=lvcr.get()["items"][0]["metadata"]["name"],
            params=param.strip("\n"),
            format_type="json",
        )
        localstorage.check_pvs_created(
            int(len(final_device_list) / new_storage_devices_sets_count))
    if lv_set_present:
        if check_pvs_present_for_ocs_expansion():
            log.info("Found Extra PV")
        else:
            if (platform == constants.VSPHERE_PLATFORM
                    and add_extra_disk_to_existing_worker):
                log.info("No Extra PV found")
                log.info("Adding Extra Disk to existing VSphere Worker node")
                add_new_disk_for_vsphere(sc_name=constants.LOCALSTORAGE_SC)
            else:
                raise PVNotSufficientException(
                    f"No Extra PV found in {constants.OPERATOR_NODE_LABEL}")
    sc = get_storage_cluster()
    # adding the storage capacity to the cluster
    params = f"""[{{ "op": "replace", "path": "/spec/storageDeviceSets/0/count",
                "value": {new_storage_devices_sets_count}}}]"""
    sc.patch(
        resource_name=sc.get()["items"][0]["metadata"]["name"],
        params=params.strip("\n"),
        format_type="json",
    )
    return new_storage_devices_sets_count
    def test_rbd_pv_encryption_vaulttenantsa(
        self,
        project_factory,
        storageclass_factory,
        multi_pvc_factory,
        pod_factory,
        kv_version,
    ):
        """
        Test to verify creation and deletion of encrypted RBD PVC using vaulttenantsa method

        """
        # Create a project
        proj_obj = project_factory()

        # Create an encryption enabled storageclass for RBD
        sc_obj = storageclass_factory(
            interface=constants.CEPHBLOCKPOOL,
            encrypted=True,
            encryption_kms_id=self.kms.kmsid,
        )

        # Create serviceaccount in the tenant namespace
        self.kms.create_tenant_sa(namespace=proj_obj.namespace)

        # Create role in Vault
        self.kms.create_vault_kube_auth_role(namespace=proj_obj.namespace)

        # Create RBD PVCs with volume mode Block
        pvc_size = 5
        pvc_objs = multi_pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            project=proj_obj,
            storageclass=sc_obj,
            size=pvc_size,
            access_modes=[
                f"{constants.ACCESS_MODE_RWX}-Block",
                f"{constants.ACCESS_MODE_RWO}-Block",
            ],
            status=constants.STATUS_BOUND,
            num_of_pvc=3,
            wait_each=False,
        )

        # Create pods
        pod_objs = create_pods(
            pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status=constants.STATUS_RUNNING,
        )

        # Verify if the key is created in Vault
        vol_handles = []
        for pvc_obj in pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            vol_handles.append(vol_handle)

        # Check if encryption key is created in Vault
        if kms.is_key_present_in_path(key=vol_handle,
                                      path=self.kms.vault_backend_path):
            log.info(f"Vault: Found key for {pvc_obj.name}")
        else:
            raise ResourceNotFoundError(
                f"Vault: Key not found for {pvc_obj.name}")

        # Verify whether encrypted device is present inside the pod and run IO
        for vol_handle, pod_obj in zip(vol_handles, pod_objs):
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")

            pod_obj.run_io(
                storage_type="block",
                size=f"{pvc_size - 1}G",
                io_direction="write",
                runtime=60,
            )
        log.info("IO started on all pods")

        # Wait for IO completion
        for pod_obj in pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on all pods")

        # Delete the pod
        for pod_obj in pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        # Delete the PVC
        for pvc_obj in pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)

        # Verify whether the key is deleted in Vault
        for vol_handle in vol_handles:
            if not kms.is_key_present_in_path(
                    key=vol_handle, path=self.kms.vault_backend_path):
                log.info(f"Vault: Key deleted for {vol_handle}")
            else:
                raise KMSResourceCleaneupError(
                    f"Vault: Key deletion failed for {vol_handle}")
Esempio n. 7
0
def create_custom_machineset(
    role="app",
    instance_type=None,
    labels=None,
    taints=None,
    zone="a",
):
    """
    Function to create custom machineset works only for AWS
    i.e. Using this user can create nodes with different instance type and role.
    https://docs.openshift.com/container-platform/4.1/machine_management/creating-machineset.html

    Args:
        role (str): Role type to be added for node eg: it will be app,worker
        instance_type (str): Type of instance
        labels (list): List of Labels (key, val) to be added to the node
        taints (list): List of taints to be applied
        zone (str): Machineset zone for node creation.

    Returns:
        machineset (str): Created machineset name

    Raise:
        ResourceNotFoundError: Incase machineset creation failed
        UnsupportedPlatformError: Incase of wrong platform

    """
    # check for aws and IPI platform
    if config.ENV_DATA["platform"].lower() == "aws":
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        m4_xlarge = "m4.xlarge"
        aws_instance = instance_type if instance_type else m4_xlarge
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            region = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("placement").get("region"))
            aws_zone = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("placement").get(
                    "availabilityZone"))
            cls_id = (machine.get("spec").get("selector").get(
                "matchLabels").get("machine.openshift.io/cluster-api-cluster"))
            ami_id = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("ami").get("id"))
            if aws_zone == f"{region}{zone}":
                machineset_yaml = templating.load_yaml(
                    constants.MACHINESET_YAML)

                # Update machineset_yaml with required values.
                machineset_yaml["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["metadata"][
                    "name"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-role"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-type"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["ami"]["id"] = ami_id
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["iamInstanceProfile"][
                        "id"] = f"{cls_id}-worker-profile"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["instanceType"] = aws_instance
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["placement"]["availabilityZone"] = aws_zone
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["placement"]["region"] = region
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["securityGroups"][0]["filters"][0]["values"][
                        0] = f"{cls_id}-worker-sg"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["subnet"]["filters"][0]["values"][
                        0] = f"{cls_id}-private-{aws_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["tags"][0][
                        "name"] = f"kubernetes.io/cluster/{cls_id}"

                # Apply the labels
                if labels:
                    for label in labels:
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"][label[0]] = label[1]
                    # Remove app label in case of infra nodes
                    if role == "infra":
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"].pop(constants.APP_LABEL,
                                                      None)

                # Apply the Taints
                # ex taint list looks like:
                # [ {'effect': 'NoSchedule',
                #    'key': 'node.ocs.openshift.io/storage',
                #    'value': 'true',
                #  }, {'effect': 'Schedule', 'key': 'xyz', 'value': 'False'} ]
                if taints:
                    machineset_yaml["spec"]["template"]["spec"].update(
                        {"taints": taints})

                # Create new custom machineset
                ms_obj = OCS(**machineset_yaml)
                ms_obj.create()
                if check_machineset_exists(f"{cls_id}-{role}-{aws_zone}"):
                    log.info(f"Machineset {cls_id}-{role}-{aws_zone} created")
                    return f"{cls_id}-{role}-{aws_zone}"
                else:
                    raise ResourceNotFoundError(
                        "Machineset resource not found")

    # check for azure and IPI platform
    elif config.ENV_DATA["platform"] == "azure":
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        vmsize = constants.AZURE_PRODUCTION_INSTANCE_TYPE
        azure_instance = instance_type if instance_type else vmsize
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            region = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("location"))
            azure_zone = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("zone"))
            cls_id = (machine.get("spec").get("selector").get(
                "matchLabels").get("machine.openshift.io/cluster-api-cluster"))
            if azure_zone == zone:
                az_zone = f"{region}{zone}"
                machineset_yaml = templating.load_yaml(
                    constants.MACHINESET_YAML_AZURE)

                # Update machineset_yaml with required values.
                machineset_yaml["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["metadata"][
                    "name"] = f"{cls_id}-{role}-{az_zone}"
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["selector"]["matchLabels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{az_zone}"
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-cluster"] = cls_id
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-role"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machine-type"] = role
                machineset_yaml["spec"]["template"]["metadata"]["labels"][
                    "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{az_zone}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["image"][
                        "resourceID"] = f"/resourceGroups/{cls_id}-rg/providers/Microsoft.Compute/images/{cls_id}"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["location"] = region
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["managedIdentity"] = f"{cls_id}-identity"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["resourceGroup"] = f"{cls_id}-rg"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["subnet"] = f"{cls_id}-worker-subnet"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["vmSize"] = azure_instance
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["vnet"] = f"{cls_id}-vnet"
                machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                    "value"]["zone"] = zone

                # Apply the labels
                if labels:
                    for label in labels:
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"][label[0]] = label[1]
                    # Remove app label in case of infra nodes
                    if role == "infra":
                        machineset_yaml["spec"]["template"]["spec"][
                            "metadata"]["labels"].pop(constants.APP_LABEL,
                                                      None)

                if taints:
                    machineset_yaml["spec"]["template"]["spec"].update(
                        {"taints": taints})

                # Create new custom machineset
                ms_obj = OCS(**machineset_yaml)
                ms_obj.create()
                if check_machineset_exists(f"{cls_id}-{role}-{az_zone}"):
                    log.info(f"Machineset {cls_id}-{role}-{az_zone} created")
                    return f"{cls_id}-{role}-{az_zone}"
                else:
                    raise ResourceNotFoundError(
                        "Machineset resource not found")

    # check for RHV and IPI platform
    elif config.ENV_DATA["platform"] == "rhv":
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            cls_uuid = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("cluster_id"))
            template_name = (machine.get("spec").get("template").get(
                "spec").get("providerSpec").get("value").get("template_name"))
            cls_id = (machine.get("spec").get("selector").get(
                "matchLabels").get("machine.openshift.io/cluster-api-cluster"))
            socket = (machine.get("spec").get("template").get("spec").get(
                "providerSpec").get("value").get("cpu").get("sockets"))

            machineset_yaml = templating.load_yaml(
                constants.MACHINESET_YAML_RHV)

            # Update machineset_yaml with required values.
            machineset_yaml["metadata"]["labels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["metadata"]["name"] = f"{cls_id}-{role}-{zone}"
            machineset_yaml["spec"]["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["spec"]["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{zone}"
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machine-role"] = role
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machine-type"] = role
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}-{zone}"
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["cluster_id"] = cls_uuid
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["template_name"] = template_name
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["cpu"]["sockets"] = socket

            # Apply the labels
            if labels:
                for label in labels:
                    machineset_yaml["spec"]["template"]["spec"]["metadata"][
                        "labels"][label[0]] = label[1]
                # Remove app label in case of infra nodes
                if role == "infra":
                    machineset_yaml["spec"]["template"]["spec"]["metadata"][
                        "labels"].pop(constants.APP_LABEL, None)

            if taints:
                machineset_yaml["spec"]["template"]["spec"].update(
                    {"taints": taints})

            # Create new custom machineset
            ms_obj = OCS(**machineset_yaml)
            ms_obj.create()
            if check_machineset_exists(f"{cls_id}-{role}-{zone}"):
                log.info(f"Machineset {cls_id}-{role}-{zone} created")
                return f"{cls_id}-{role}-{zone}"
            else:
                raise ResourceNotFoundError("Machineset resource not found")

    # check for vmware and IPI platform
    elif config.ENV_DATA["platform"] == constants.VSPHERE_PLATFORM:
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE,
        )
        for machine in machinesets_obj.get()["items"]:
            # Get inputs from existing machineset config.
            cls_id = machine.get("spec")["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-cluster"]
            disk_size = machine.get(
                "spec")["template"]["spec"]["providerSpec"]["value"]["diskGiB"]
            memory = machine.get("spec")["template"]["spec"]["providerSpec"][
                "value"]["memoryMiB"]
            network_name = machine.get(
                "spec")["template"]["spec"]["providerSpec"]["value"][
                    "network"]["devices"][0]["networkName"]
            num_cpu = machine.get(
                "spec")["template"]["spec"]["providerSpec"]["value"]["numCPUs"]
            num_core = machine.get("spec")["template"]["spec"]["providerSpec"][
                "value"]["numCoresPerSocket"]
            vm_template = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["template"]
            datacenter = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["datacenter"]
            datastore = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["datastore"]
            ds_folder = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["folder"]
            ds_resourcepool = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["resourcePool"]
            ds_server = machine.get("spec")["template"]["spec"][
                "providerSpec"]["value"]["workspace"]["server"]

            machineset_yaml = templating.load_yaml(
                constants.MACHINESET_YAML_VMWARE)

            # Update machineset_yaml with required values.
            machineset_yaml["metadata"]["labels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["metadata"]["name"] = f"{cls_id}-{role}"
            machineset_yaml["spec"]["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["spec"]["selector"]["matchLabels"][
                "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}"
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-cluster"] = cls_id
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machine-role"] = role
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machine-type"] = role
            machineset_yaml["spec"]["template"]["metadata"]["labels"][
                "machine.openshift.io/cluster-api-machineset"] = f"{cls_id}-{role}"
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["diskGiB"] = disk_size
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["memoryMiB"] = memory
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["network"]["devices"][0]["networkName"] = network_name
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["numCPUs"] = num_cpu
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["numCoresPerSocket"] = num_core
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["template"] = vm_template
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["datacenter"] = datacenter
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["datastore"] = datastore
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["folder"] = ds_folder
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["resourcepool"] = ds_resourcepool
            machineset_yaml["spec"]["template"]["spec"]["providerSpec"][
                "value"]["workspace"]["server"] = ds_server

            # Apply the labels
            if labels:
                for label in labels:
                    machineset_yaml["spec"]["template"]["spec"]["metadata"][
                        "labels"][label[0]] = label[1]
                # Remove app label in case of infra nodes
                if role == "infra":
                    machineset_yaml["spec"]["template"]["spec"]["metadata"][
                        "labels"].pop(constants.APP_LABEL, None)

            if taints:
                machineset_yaml["spec"]["template"]["spec"].update(
                    {"taints": taints})

            # Create new custom machineset
            ms_obj = OCS(**machineset_yaml)
            ms_obj.create()
            if check_machineset_exists(f"{cls_id}-{role}"):
                log.info(f"Machineset {cls_id}-{role} created")
                return f"{cls_id}-{role}"
            else:
                raise ResourceNotFoundError("Machineset resource not found")

    else:
        raise UnsupportedPlatformError(
            "Functionality not supported in this platform")
    def test_pvc_to_pvc_clone(self, kv_version, kms_provider, pod_factory):
        """
        Test to create a clone from an existing encrypted RBD PVC.
        Verify that the cloned PVC is encrypted and all the data is preserved.

        """

        log.info("Checking for encrypted device and running IO on all pods")
        for vol_handle, pod_obj in zip(self.vol_handles, self.pod_objs):
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")
            log.info(f"File created during IO {pod_obj.name}")
            pod_obj.run_io(
                storage_type="block",
                size="500M",
                io_direction="write",
                runtime=60,
                end_fsync=1,
                direct=1,
            )
        log.info("IO started on all pods")

        # Wait for IO completion
        for pod_obj in self.pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on all pods")

        cloned_pvc_objs, cloned_vol_handles = ([] for i in range(2))

        # Calculate the md5sum value and create clones of exisiting PVCs
        log.info("Calculate the md5sum after IO and create clone of all PVCs")
        for pod_obj in self.pod_objs:
            pod_obj.md5sum_after_io = pod.cal_md5sum(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                block=True,
            )

            cloned_pvc_obj = pvc.create_pvc_clone(
                self.sc_obj.name,
                pod_obj.pvc.name,
                constants.CSI_RBD_PVC_CLONE_YAML,
                self.proj_obj.namespace,
                volume_mode=constants.VOLUME_MODE_BLOCK,
                access_mode=pod_obj.pvc.access_mode,
            )
            helpers.wait_for_resource_state(cloned_pvc_obj,
                                            constants.STATUS_BOUND)
            cloned_pvc_obj.reload()
            cloned_pvc_obj.md5sum = pod_obj.md5sum_after_io
            cloned_pvc_objs.append(cloned_pvc_obj)
        log.info("Clone of all PVCs created")

        # Create and attach pod to the pvc
        cloned_pod_objs = helpers.create_pods(
            cloned_pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status="",
        )

        # Verify the new pods are running
        log.info("Verify the new pods are running")
        for pod_obj in cloned_pod_objs:
            helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
            pod_obj.reload()
        log.info("Verified: New pods are running")

        # Verify encryption keys are created for cloned PVCs in Vault
        for pvc_obj in cloned_pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            cloned_vol_handles.append(vol_handle)

            if kms_provider == constants.VAULT_KMS_PROVIDER:
                if kms.is_key_present_in_path(
                        key=vol_handle, path=self.kms.vault_backend_path):
                    log.info(
                        f"Vault: Found key for restore PVC {pvc_obj.name}")
                else:
                    raise ResourceNotFoundError(
                        f"Vault: Key not found for restored PVC {pvc_obj.name}"
                    )
        # Verify encrypted device is present and md5sum on all pods
        for vol_handle, pod_obj in zip(cloned_vol_handles, cloned_pod_objs):
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")

            log.info(f"Verifying md5sum on pod {pod_obj.name}")
            pod.verify_data_integrity(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                original_md5sum=pod_obj.pvc.md5sum,
                block=True,
            )
            log.info(f"Verified md5sum on pod {pod_obj.name}")

        # Run IO on new pods
        log.info("Starting IO on new pods")
        for pod_obj in cloned_pod_objs:
            pod_obj.run_io(storage_type="block", size="100M", runtime=10)

        # Wait for IO completion on new pods
        log.info("Waiting for IO completion on new pods")
        for pod_obj in cloned_pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on new pods.")

        # Delete the restored pods, PVC and snapshots
        log.info("Deleting all pods")
        for pod_obj in cloned_pod_objs + self.pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        log.info("Deleting all PVCs")
        for pvc_obj in cloned_pvc_objs + self.pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)

        if kms_provider == constants.VAULT_KMS_PROVIDER:
            # Verify if the keys for parent and cloned PVCs are deleted from Vault
            if kv_version == "v1" or Version.coerce(
                    config.ENV_DATA["ocs_version"]) >= Version.coerce("4.9"):
                log.info(
                    "Verify whether the keys for cloned PVCs are deleted from vault"
                )
                for key in cloned_vol_handles + self.vol_handles:
                    if not kms.is_key_present_in_path(
                            key=key, path=self.kms.vault_backend_path):
                        log.info(f"Vault: Key deleted for {key}")
                    else:
                        raise KMSResourceCleaneupError(
                            f"Vault: Key deletion failed for {key}")
                log.info("All keys from vault were deleted")
    def setup(
        self,
        kv_version,
        kms_provider,
        use_vault_namespace,
        pv_encryption_kms_setup_factory,
        project_factory,
        multi_pvc_factory,
        pod_factory,
        storageclass_factory,
    ):
        """
        Setup csi-kms-connection-details configmap and create resources for the test

        """

        log.info("Setting up csi-kms-connection-details configmap")
        self.kms = pv_encryption_kms_setup_factory(kv_version,
                                                   use_vault_namespace)
        log.info("csi-kms-connection-details setup successful")

        # Create a project
        self.proj_obj = project_factory()

        # Create an encryption enabled storageclass for RBD
        self.sc_obj = storageclass_factory(
            interface=constants.CEPHBLOCKPOOL,
            encrypted=True,
            encryption_kms_id=self.kms.kmsid,
        )

        if kms_provider == constants.VAULT_KMS_PROVIDER:
            # Create ceph-csi-kms-token in the tenant namespace
            self.kms.vault_path_token = self.kms.generate_vault_token()
            self.kms.create_vault_csi_kms_token(
                namespace=self.proj_obj.namespace)

        # Create PVC and Pods
        self.pvc_size = 1
        self.pvc_objs = multi_pvc_factory(
            interface=constants.CEPHBLOCKPOOL,
            project=self.proj_obj,
            storageclass=self.sc_obj,
            size=self.pvc_size,
            access_modes=[
                f"{constants.ACCESS_MODE_RWX}-Block",
                f"{constants.ACCESS_MODE_RWO}-Block",
            ],
            status=constants.STATUS_BOUND,
            num_of_pvc=2,
            wait_each=False,
        )

        self.pod_objs = helpers.create_pods(
            self.pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status=constants.STATUS_RUNNING,
        )

        # Verify if the key is created in Vault
        self.vol_handles = []
        for pvc_obj in self.pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            self.vol_handles.append(vol_handle)

            if kms_provider == constants.VAULT_KMS_PROVIDER:
                if kms.is_key_present_in_path(
                        key=vol_handle, path=self.kms.vault_backend_path):
                    log.info(f"Vault: Found key for {pvc_obj.name}")
                else:
                    raise ResourceNotFoundError(
                        f"Vault: Key not found for {pvc_obj.name}")
Esempio n. 10
0
def create_custom_machineset(role='app',
                             instance_type='m4.xlarge',
                             label='app-scale',
                             zone='a'):
    """
    Function to create custom machineset works only for AWS
    i.e. Using this user can create nodes with different instance type and role.
    https://docs.openshift.com/container-platform/4.1/machine_management/creating-machineset.html

    Args:
        role (str): Role type to be added for node eg: it will be app,worker
        instance_type (str): Type of aws instance
        label (str): Label to be added to the node
        zone (str): Machineset zone for node creation.

    Returns:
        machineset (str): Created machineset name

    Raise:
        ResourceNotFoundError: Incase machineset creation failed
        UnsupportedPlatformError: Incase of wrong platform

    """
    # check for platform, since it's supported only for IPI
    if config.ENV_DATA['deployment_type'] == 'ipi':
        machinesets_obj = OCP(
            kind=constants.MACHINESETS,
            namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE)
        for machine in machinesets_obj.get()['items']:
            # Get inputs from existing machineset config.
            region = machine.get('spec').get('template').get('spec').get(
                'providerSpec').get('value').get('placement').get('region')
            aws_zone = machine.get('spec').get('template').get('spec').get(
                'providerSpec').get('value').get('placement').get(
                    'availabilityZone')
            cls_id = machine.get('spec').get('selector').get(
                'matchLabels').get('machine.openshift.io/cluster-api-cluster')
            ami_id = machine.get('spec').get('template').get('spec').get(
                'providerSpec').get('value').get('ami').get('id')
            if aws_zone == f"{region}{zone}":
                machineset_yaml = templating.load_yaml(
                    constants.MACHINESET_YAML)

                # Update machineset_yaml with required values.
                machineset_yaml['metadata']['labels'][
                    'machine.openshift.io/cluster-api-cluster'] = cls_id
                machineset_yaml['metadata'][
                    'name'] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml['spec']['selector']['matchLabels'][
                    'machine.openshift.io/cluster-api-cluster'] = cls_id
                machineset_yaml['spec']['selector']['matchLabels'][
                    'machine.openshift.io/cluster-api-machineset'] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml['spec']['template']['metadata']['labels'][
                    'machine.openshift.io/cluster-api-cluster'] = cls_id
                machineset_yaml['spec']['template']['metadata']['labels'][
                    'machine.openshift.io/cluster-api-machine-role'] = role
                machineset_yaml['spec']['template']['metadata']['labels'][
                    'machine.openshift.io/cluster-api-machine-type'] = role
                machineset_yaml['spec']['template']['metadata']['labels'][
                    'machine.openshift.io/cluster-api-machineset'] = f"{cls_id}-{role}-{aws_zone}"
                machineset_yaml['spec']['template']['spec']['metadata'][
                    'labels'][f"node-role.kubernetes.io/{role}"] = f"{label}"
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['ami']['id'] = ami_id
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['iamInstanceProfile'][
                        'id'] = f"{cls_id}-worker-profile"
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['instanceType'] = instance_type
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['placement']['availabilityZone'] = aws_zone
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['placement']['region'] = region
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['securityGroups'][0]['filters'][0]['values'][
                        0] = f"{cls_id}-worker-sg"
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['subnet']['filters'][0]['values'][
                        0] = f"{cls_id}-private-{aws_zone}"
                machineset_yaml['spec']['template']['spec']['providerSpec'][
                    'value']['tags'][0][
                        'name'] = f"kubernetes.io/cluster/{cls_id}"

                # Create new custom machineset
                ms_obj = OCS(**machineset_yaml)
                ms_obj.create()
                if check_machineset_exists(f"{cls_id}-{role}-{aws_zone}"):
                    logging.info(
                        f"Machineset {cls_id}-{role}-{aws_zone} created")
                    return f"{cls_id}-{role}-{aws_zone}"
                else:
                    raise ResourceNotFoundError(
                        f"Machineset resource not found")
    else:
        raise UnsupportedPlatformError("Functionality not supported in UPI")
Esempio n. 11
0
def add_capacity(osd_size_capacity_requested):
    """
    Add storage capacity to the cluster

    Args:
        osd_size_capacity_requested(int): Requested osd size capacity

    Returns:
        new storage device set count (int) : Returns True if all OSDs are in Running state

    Note:
    "StoragedeviceSets->count" represents the set of 3 OSDs.
    That is, if there are 3 OSDs in the system then count will be 1.
    If there are 6 OSDs then count is 2 and so on.
    By changing this value,we can add extra devices to the cluster.
    For example, if we want to expand the cluster by 3 more osds in a cluster that already has 3 osds,
    we can set count as 2. So, with each increase of count by 1,
    we get 3 OSDs extra added to the cluster.
    This is how we are going to 'add capacity' via automation.
    As we know that OCS has 3 way replica. That is, same data is placed in 3 OSDs.
    Because of this, the total usable capacity for apps from 3 OSDs
    will be the size of one OSD (all osds are of same size).
    If we want to add more capacity to the cluster then we need to add 3 OSDs of same size
    as that of the original OSD. add_capacity needs to accept the 'capacity_to_add' as an argument.
    From this we need to arrive at storagedeviceSets -> count and then
    "Patch" this count to get the required capacity to add.
    To do so, we use following formula:
    storageDeviceSets->count = (capacity reqested / osd capacity ) + existing count storageDeviceSets

    """
    osd_size_existing = get_osd_size()
    device_sets_required = int(osd_size_capacity_requested / osd_size_existing)
    old_storage_devices_sets_count = get_deviceset_count()
    new_storage_devices_sets_count = int(device_sets_required +
                                         old_storage_devices_sets_count)
    lvpresent = localstorage.check_local_volume()
    ocp_version = get_ocp_version()
    platform = config.ENV_DATA.get("platform", "").lower()
    is_lso = config.DEPLOYMENT.get("local_storage")
    if (ocp_version == "4.7" and (platform == constants.AWS_PLATFORM
                                  or platform == constants.VSPHERE_PLATFORM)
            and (not is_lso)):
        logging.info("Add capacity via UI")
        setup_ui = login_ui()
        add_ui_obj = AddReplaceDeviceUI(setup_ui)
        add_ui_obj.add_capacity_ui()
        close_browser(setup_ui)
    else:
        if lvpresent:
            ocp_obj = OCP(kind="localvolume",
                          namespace=config.ENV_DATA["local_storage_namespace"])
            localvolume_data = ocp_obj.get(resource_name="local-block")
            device_list = localvolume_data["spec"]["storageClassDevices"][0][
                "devicePaths"]
            final_device_list = localstorage.get_new_device_paths(
                device_sets_required, osd_size_capacity_requested)
            device_list.sort()
            final_device_list.sort()
            if device_list == final_device_list:
                raise ResourceNotFoundError("No Extra device found")
            param = f"""[{{ "op": "replace", "path": "/spec/storageClassDevices/0/devicePaths",
                                                     "value": {final_device_list}}}]"""
            log.info(f"Final device list : {final_device_list}")
            lvcr = localstorage.get_local_volume_cr()
            log.info("Patching Local Volume CR...")
            lvcr.patch(
                resource_name=lvcr.get()["items"][0]["metadata"]["name"],
                params=param.strip("\n"),
                format_type="json",
            )
            localstorage.check_pvs_created(
                int(len(final_device_list) / new_storage_devices_sets_count))
        sc = get_storage_cluster()
        # adding the storage capacity to the cluster
        params = f"""[{{ "op": "replace", "path": "/spec/storageDeviceSets/0/count",
                    "value": {new_storage_devices_sets_count}}}]"""
        sc.patch(
            resource_name=sc.get()["items"][0]["metadata"]["name"],
            params=params.strip("\n"),
            format_type="json",
        )
    return new_storage_devices_sets_count
    def test_encrypted_rbd_block_pvc_snapshot(
        self,
        kms_provider,
        snapshot_factory,
        snapshot_restore_factory,
        pod_factory,
        kv_version,
    ):
        """
        Test to take snapshots of encrypted RBD Block VolumeMode PVCs

        """

        log.info(
            "Check for encrypted device, find initial md5sum value and run IO on all pods"
        )
        for vol_handle, pod_obj in zip(self.vol_handles, self.pod_objs):

            # Verify whether encrypted device is present inside the pod
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")

            # Find initial md5sum
            pod_obj.md5sum_before_io = cal_md5sum(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                block=True,
            )
            pod_obj.run_io(
                storage_type="block",
                size=f"{self.pvc_size - 1}G",
                io_direction="write",
                runtime=60,
            )
        log.info("IO started on all pods")

        # Wait for IO completion
        for pod_obj in self.pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on all pods")

        snap_objs, snap_handles = ([] for i in range(2))

        # Verify md5sum has changed after IO. Create snapshot
        log.info(
            "Verify md5sum has changed after IO and create snapshot from all PVCs"
        )
        for pod_obj in self.pod_objs:
            md5sum_after_io = cal_md5sum(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                block=True,
            )
            assert (pod_obj.md5sum_before_io != md5sum_after_io
                    ), f"md5sum has not changed after IO on pod {pod_obj.name}"
            log.info(f"Creating snapshot of PVC {pod_obj.pvc.name}")
            snap_obj = snapshot_factory(pod_obj.pvc, wait=False)
            snap_obj.md5sum = md5sum_after_io
            snap_objs.append(snap_obj)
        log.info("Snapshots created")

        # Verify snapshots are ready and verify if encryption key is created in vault
        log.info("Verify snapshots are ready")
        for snap_obj in snap_objs:
            snap_obj.ocp.wait_for_resource(
                condition="true",
                resource_name=snap_obj.name,
                column=constants.STATUS_READYTOUSE,
                timeout=180,
            )
            snapshot_content = get_snapshot_content_obj(snap_obj=snap_obj)
            snap_handle = snapshot_content.get().get("status").get(
                "snapshotHandle")
            if kms_provider == constants.VAULT_KMS_PROVIDER:
                if kms.is_key_present_in_path(
                        key=snap_handle, path=self.kms.vault_backend_path):
                    log.info(f"Vault: Found key for snapshot {snap_obj.name}")
                else:
                    raise ResourceNotFoundError(
                        f"Vault: Key not found for snapshot {snap_obj.name}")
            snap_handles.append(snap_handle)

        # Delete pods
        log.info("Deleting the pods")
        for pod_obj in self.pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
        log.info("Deleted all the pods")

        # Delete parent PVCs to verify snapshot is independent
        log.info("Deleting parent PVCs")
        for pvc_obj in self.pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
            log.info(f"Deleted PVC {pvc_obj.name}. Verifying whether PV "
                     f"{pv_obj.name} is deleted.")
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)
        log.info(
            "All parent PVCs and PVs are deleted before restoring snapshot.")

        restore_pvc_objs, restore_vol_handles = ([] for i in range(2))

        # Create PVCs out of the snapshots
        log.info("Creating new PVCs from snapshots")
        for snap_obj in snap_objs:
            log.info(f"Creating a PVC from snapshot {snap_obj.name}")
            restore_pvc_obj = snapshot_restore_factory(
                snapshot_obj=snap_obj,
                storageclass=self.sc_obj.name,
                size=f"{self.pvc_size}Gi",
                volume_mode=snap_obj.parent_volume_mode,
                access_mode=snap_obj.parent_access_mode,
                status="",
            )
            log.info(f"Created PVC {restore_pvc_obj.name} from snapshot "
                     f"{snap_obj.name}")
            restore_pvc_obj.md5sum = snap_obj.md5sum
            restore_pvc_objs.append(restore_pvc_obj)
        log.info("Created new PVCs from all the snapshots")

        # Confirm that the restored PVCs are Bound
        log.info("Verify the restored PVCs are Bound")
        for pvc_obj in restore_pvc_objs:
            wait_for_resource_state(resource=pvc_obj,
                                    state=constants.STATUS_BOUND,
                                    timeout=180)
            pvc_obj.reload()
        log.info("Verified: Restored PVCs are Bound.")

        # Attach the restored PVCs to pods. Attach RWX PVC on two pods
        log.info("Attach the restored PVCs to pods")
        restore_pod_objs = create_pods(
            restore_pvc_objs,
            pod_factory,
            constants.CEPHBLOCKPOOL,
            pods_for_rwx=1,
            status="",
        )

        # Verify the new pods are running
        log.info("Verify the new pods are running")
        for pod_obj in restore_pod_objs:
            timeout = (300 if config.ENV_DATA["platform"]
                       == constants.IBMCLOUD_PLATFORM else 60)
            wait_for_resource_state(pod_obj, constants.STATUS_RUNNING, timeout)
        log.info("Verified: New pods are running")

        # Verify encryption keys are created for restored PVCs in Vault
        for pvc_obj in restore_pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            restore_vol_handles.append(vol_handle)
            if kms_provider == constants.VAULT_KMS_PROVIDER:
                if kms.is_key_present_in_path(
                        key=vol_handle, path=self.kms.vault_backend_path):
                    log.info(
                        f"Vault: Found key for restore PVC {pvc_obj.name}")
                else:
                    raise ResourceNotFoundError(
                        f"Vault: Key not found for restored PVC {pvc_obj.name}"
                    )

        # Verify encrypted device is present and md5sum on all pods
        for vol_handle, pod_obj in zip(restore_vol_handles, restore_pod_objs):
            if pod_obj.exec_sh_cmd_on_pod(
                    command=f"lsblk | grep {vol_handle} | grep crypt"):
                log.info(f"Encrypted device found in {pod_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Encrypted device not found in {pod_obj.name}")

            log.info(f"Verifying md5sum on pod {pod_obj.name}")
            verify_data_integrity(
                pod_obj=pod_obj,
                file_name=pod_obj.get_storage_path(storage_type="block"),
                original_md5sum=pod_obj.pvc.md5sum,
                block=True,
            )
            log.info(f"Verified md5sum on pod {pod_obj.name}")

        # Run IO on new pods
        log.info("Starting IO on new pods")
        for pod_obj in restore_pod_objs:
            pod_obj.run_io(storage_type="block", size="500M", runtime=15)

        # Wait for IO completion on new pods
        log.info("Waiting for IO completion on new pods")
        for pod_obj in restore_pod_objs:
            pod_obj.get_fio_results()
        log.info("IO completed on new pods.")

        # Delete the restored pods, PVC and snapshots
        log.info("Deleting pods using restored PVCs")
        for pod_obj in restore_pod_objs:
            pod_obj.delete()
            pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)

        log.info("Deleting restored PVCs")
        for pvc_obj in restore_pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            pvc_obj.delete()
            pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)

        log.info("Deleting the snapshots")
        for snap_obj in snap_objs:
            snapcontent_obj = get_snapshot_content_obj(snap_obj=snap_obj)
            snap_obj.delete()
            snapcontent_obj.ocp.wait_for_delete(
                resource_name=snapcontent_obj.name)

        if kms_provider == constants.VAULT_KMS_PROVIDER:
            # Verify if keys for PVCs and snapshots are deleted from  Vault
            if kv_version == "v1" or Version.coerce(
                    config.ENV_DATA["ocs_version"]) >= Version.coerce("4.9"):
                log.info(
                    "Verify whether the keys for PVCs and snapshots are deleted in vault"
                )
                for key in self.vol_handles + snap_handles + restore_vol_handles:
                    if not kms.is_key_present_in_path(
                            key=key, path=self.kms.vault_backend_path):
                        log.info(f"Vault: Key deleted for {key}")
                    else:
                        raise KMSResourceCleaneupError(
                            f"Vault: Key deletion failed for {key}")
                log.info("All keys from vault were deleted")
Esempio n. 13
0
    def test_encrypted_pvc_clone(
        self,
        kv_version,
        pv_encryption_kms_setup_factory,
        storageclass_factory,
        pgsql_factory_fixture,
        pvc_clone_factory,
        pgsql_teardown,
    ):
        """
        1. Create encrypted storage class
        2. Deploy PGSQL workload using created sc
        3. Create multiple clone of same PVC when the PVC usage is different
        4. Attach a new pgsql pod to it.
        5. Create pgbench benchmark to new pgsql pod
        6. Verify if key is created for cloned pvc
        """
        pgsql_teardown

        log.info("Setting up csi-kms-connection-details configmap")
        self.vault = pv_encryption_kms_setup_factory(kv_version)
        log.info("csi-kms-connection-details setup successful")

        # Create an encryption enabled storageclass for RBD
        self.sc_obj = storageclass_factory(
            interface=CEPHBLOCKPOOL,
            encrypted=True,
            encryption_kms_id=self.vault.kmsid,
        )

        # Create ceph-csi-kms-token in the tenant namespace
        self.vault.vault_path_token = self.vault.generate_vault_token()
        self.vault.create_vault_csi_kms_token(namespace=BMO_NAME)

        # Deploy PGSQL workload
        log.info("Deploying pgsql workloads")
        pgsql = pgsql_factory_fixture(replicas=1, sc_name=self.sc_obj.name)

        # Get postgres pvcs obj list
        postgres_pvcs_obj = pgsql.get_postgres_pvc()

        # Get postgres pods obj list
        postgres_pods_obj = pgsql.get_postgres_pods()

        cloned_pvcs = self.create_cloned_pvc_and_verify_data(
            pgsql,
            postgres_pvcs_obj,
            postgres_pods_obj,
            pvc_clone_factory,
            self.sc_obj.name,
        )

        # Verify encryption keys are created for cloned PVCs in Vault
        for pvc_obj in cloned_pvcs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            if kms.is_key_present_in_path(key=vol_handle,
                                          path=self.vault.vault_backend_path):
                log.info(f"Vault: Found key for restore PVC {pvc_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Vault: Key not found for restored PVC {pvc_obj.name}")
Esempio n. 14
0
    def test_encrypted_pvc_snapshot(
        self,
        kv_version,
        pv_encryption_kms_setup_factory,
        storageclass_factory,
        pgsql_factory_fixture,
        snapshot_factory,
        snapshot_restore_factory,
        pgsql_teardown,
    ):
        """
        1. Create encrypted storage class
        2. Deploy PGSQL workload using created sc
        3. Take a snapshot of the pgsql PVC.
        4. Create a new PVC out of that snapshot or restore snapshot
        5. Attach a new pgsql pod to it.
        6. Create pgbench benchmark to new pgsql pod
        7. Verify if key is created

        """
        pgsql_teardown

        log.info("Setting up csi-kms-connection-details configmap")
        self.vault = pv_encryption_kms_setup_factory(kv_version)
        log.info("csi-kms-connection-details setup successful")

        # Create an encryption enabled storageclass for RBD
        self.sc_obj = storageclass_factory(
            interface=CEPHBLOCKPOOL,
            encrypted=True,
            encryption_kms_id=self.vault.kmsid,
        )

        # Create ceph-csi-kms-token in the tenant namespace
        self.vault.vault_path_token = self.vault.generate_vault_token()
        self.vault.create_vault_csi_kms_token(namespace=BMO_NAME)

        # Deploy PGSQL workload
        log.info("Deploying pgsql workloads")
        pgsql = pgsql_factory_fixture(replicas=1, sc_name=self.sc_obj.name)

        # Get postgres pvc list obj
        postgres_pvcs_obj = pgsql.get_postgres_pvc()

        # Take a snapshot of it
        snapshots, restore_pvc_objs = self.create_snapshot(
            pgsql,
            postgres_pvcs_obj,
            snapshot_factory,
            snapshot_restore_factory,
            self.sc_obj.name,
        )

        # Verify encryption keys are created for snapshots in Vault
        for snap_obj in snapshots:
            snapshot_content = get_snapshot_content_obj(snap_obj=snap_obj)
            snap_handle = snapshot_content.get().get("status").get(
                "snapshotHandle")
            if kms.is_key_present_in_path(key=snap_handle,
                                          path=self.vault.vault_backend_path):
                log.info(f"Vault: Found key for snapshot {snap_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Vault: Key not found for snapshot {snap_obj.name}")

        # Verify encryption keys are created for restored PVCs in Vault
        for pvc_obj in restore_pvc_objs:
            pv_obj = pvc_obj.backed_pv_obj
            vol_handle = pv_obj.get().get("spec").get("csi").get(
                "volumeHandle")
            if kms.is_key_present_in_path(key=vol_handle,
                                          path=self.vault.vault_backend_path):
                log.info(f"Vault: Found key for restore PVC {pvc_obj.name}")
            else:
                raise ResourceNotFoundError(
                    f"Vault: Key not found for restored PVC {pvc_obj.name}")