def create_pvc_and_verify_pvc_exists(sc_name,
                                     cbp_name,
                                     desired_status=constants.STATUS_BOUND,
                                     wait=True):
    """
    Create pvc, verify pvc is bound in state and
    pvc exists on ceph side
    """

    pvc_data = defaults.CSI_PVC_DICT.copy()
    pvc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'pvc')
    pvc_data['spec']['storageClassName'] = sc_name
    pvc_data['spec']['resources']['requests']['storage'] = "10Gi"
    pvc_obj = pvc.PVC(**pvc_data)
    pvc_obj.create()
    if wait:
        assert pvc_obj.ocp.wait_for_resource(
            condition=desired_status, resource_name=pvc_obj.name
        ), f"{pvc_obj.kind} {pvc_obj.name} failed to reach"
        f"status {desired_status}"
    pvc_obj.reload()

    # Validate pv is created on ceph
    logger.info(f"Verifying pv exists on backend")
    ct_pod = pod.get_ceph_tools_pod()
    pv_list = ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls -p {cbp_name}",
                                   format='json')
    _rc = pvc_obj.backed_pv in pv_list
    assert _rc, f"pv doesn't exist on backend"
    logger.info(f"pv {pvc_obj.backed_pv} exists on backend")
    return pvc_obj
def verify_pv_not_exists(pv_name, cbp_name):
    """
    Ensure that pv does not exists
    """

    # Validate on ceph side
    logger.info(f"Verifying pv {pv_name} exists on backend")
    ct_pod = pod.get_ceph_tools_pod()
    pvc_list = ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls -p {cbp_name}",
                                    format='json')
    _rc = pv_name in pvc_list

    if _rc:
        raise UnexpectedBehaviour(f"pv {pv_name} exists on backend")
    logger.info(
        f"Expected: pv {pv_name} doesn't exist on backend after deleting pvc")

    # Validate on oc side
    try:
        PV.get(pv_name)
    except CommandFailed as ecf:
        assert "not found" in str(ecf), (
            f"Unexpected: pv {pv_name} still exists")
    logger.info(f"Expected: pv should not be found "
                f"after deleting corresponding pvc")
Пример #3
0
def test_run():
    tools_pod = pod.get_ceph_tools_pod()
    tools_pod.add_role(role='client')

    return radosbench.run(ceph_pods=[tools_pod],
                          config={
                              'time': 10,
                              'cleanup': False
                          })
Пример #4
0
def get_cephfs_data_pool_name():
    """
    Fetches ceph fs datapool name from Ceph

    Returns:
        str: fs datapool name
    """
    ct_pod = pod.get_ceph_tools_pod()
    out = ct_pod.exec_ceph_cmd('ceph fs ls')
    return out[0]['data_pools'][0]
Пример #5
0
def test_main():
    tools_pod = pod.get_ceph_tools_pod()
    cmd = "ceph osd df"

    out, err, ret = tools_pod.exec_ceph_cmd(ceph_cmd=cmd)
    if out:
        print(out)
    if err:
        print(err)
    print(ret)
Пример #6
0
def get_admin_key():
    """
    Fetches admin key secret from Ceph

    Returns:
        str: The admin key
    """
    ct_pod = pod.get_ceph_tools_pod()
    out = ct_pod.exec_ceph_cmd('ceph auth get-key client.admin')
    base64_output = base64.b64encode(out['key'].encode()).decode()
    return base64_output
def check_ceph_used_space():
    """
    Check for the used space in cluster
    """
    ct_pod = pod.get_ceph_tools_pod()
    ceph_status = ct_pod.exec_ceph_cmd(ceph_cmd="ceph status")
    assert ceph_status is not None
    used = ceph_status['pgmap']['bytes_used']
    used_in_gb = used / constants.GB
    global used_space
    if used_space and used_space == used_in_gb:
        return used_in_gb
    used_space = used_in_gb
    raise UnexpectedBehaviour(f"In Ceph status, used size is keeping varying")
Пример #8
0
def verify_block_pool_exists(pool_name):
    """
    Verify if a Ceph block pool exist

    Args:
        pool_name (str): The name of the Ceph block pool

    Returns:
        bool: True if the Ceph block pool exists, False otherwise
    """
    logger.info(f"Verifying that block pool {pool_name} exists")
    ct_pod = pod.get_ceph_tools_pod()
    pools = ct_pod.exec_ceph_cmd('ceph osd lspools')
    for pool in pools:
        if pool_name in pool.get('poolname'):
            return True
    return False
Пример #9
0
def validate_cephfilesystem(fs_name):
    """
     Verify CephFileSystem exists at ceph and k8s

     Args:
        fs_name (str): The name of the Ceph FileSystem

     Returns:
         bool: True if CephFileSystem is created at ceph and k8s side else
            will return False with valid msg i.e Failure cause
    """
    CFS = ocp.OCP(
        kind=constants.CEPHFILESYSTEM,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE
    )
    ct_pod = pod.get_ceph_tools_pod()
    ceph_validate = False
    k8s_validate = False
    cmd = "ceph fs ls"
    logger.info(fs_name)
    out = ct_pod.exec_ceph_cmd(ceph_cmd=cmd)
    if out:
        out = out[0]['name']
        logger.info(out)
        if out == fs_name:
            logger.info("FileSystem got created from Ceph Side")
            ceph_validate = True
        else:
            logger.error("FileSystem was not present at Ceph Side")
            return False
    result = CFS.get(resource_name=fs_name)
    if result['metadata']['name']:
        logger.info(f"Filesystem got created from kubernetes Side")
        k8s_validate = True
    else:
        logger.error("Filesystem was not create at Kubernetes Side")
        return False
    return True if (ceph_validate and k8s_validate) else False