Пример #1
0
def create_cephfilesystem():
    """
    Function for deploying CephFileSystem (MDS)

    Returns:
        bool: True if CephFileSystem creates successful
    """
    fs_data = defaults.CEPHFILESYSTEM_DICT.copy()
    fs_data['metadata']['name'] = create_unique_resource_name(
        'test', 'cephfs'
    )
    fs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace']
    global CEPHFS_OBJ
    CEPHFS_OBJ = OCS(**fs_data)
    CEPHFS_OBJ.create()
    POD = pod.get_all_pods(
        namespace=defaults.ROOK_CLUSTER_NAMESPACE
    )
    for pod_names in POD:
        if 'rook-ceph-mds' in pod_names.labels.values():
            assert pod_names.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector='app=rook-ceph-mds'
            )
    assert validate_cephfilesystem(fs_name=fs_data['metadata']['name'])
    return True
Пример #2
0
def create_resource(desired_status=constants.STATUS_AVAILABLE,
                    wait=True,
                    **kwargs):
    """
    Create a resource

    Args:
        desired_status (str): The status of the resource to wait for
        wait (bool): True for waiting for the resource to reach the desired
            status, False otherwise
        kwargs (dict): Dictionary of the OCS resource

    Returns:
        OCS: An OCS instance

    Raises:
        AssertionError: In case of any failure
    """
    ocs_obj = OCS(**kwargs)
    resource_name = kwargs.get('metadata').get('name')
    created_resource = ocs_obj.create()
    assert created_resource, (f"Failed to create resource {resource_name}")
    if wait:
        assert ocs_obj.ocp.wait_for_resource(
            condition=desired_status, resource_name=resource_name
        ), f"{ocs_obj.kind} {resource_name} failed to reach"
        f"status {desired_status}"
    return ocs_obj
Пример #3
0
def invalid_storageclass(request):
    """
    Creates a CephFS or RBD StorageClass with invalid parameters.

    Storageclass is removed at the end of test.

    Returns:
        str: Name of created StorageClass
    """
    logger.info(f"SETUP - creating storageclass "
                f"{request.param['values']['storageclass_name']}")
    yaml_path = os.path.join(request.param['template_dir'],
                             "storageclass.yaml")
    yaml_data = yaml.safe_load(open(yaml_path, 'r'))
    yaml_data.update(request.param['values'])
    storageclass = OCS(**yaml_data)
    sc_data = storageclass.create()

    logger.debug('Check that storageclass has assigned creationTimestamp')
    assert sc_data['metadata']['creationTimestamp']

    yield sc_data

    logger.info(f"TEARDOWN - removing storageclass "
                f"{request.param['values']['storageclass_name']}")
    storageclass.delete()
def create_storageclass(sc_name, expect_fail=False):
    """
    Function to create a storage class and check for
    duplicate storage class name

    Args:
        sc_name (str): name of the storageclass to be created
        expect_fail (bool): To catch the incorrect scenario if
            two SCs are indeed created with same name

    Returns:
        None

    """

    # Create a storage class
    namespace = config.ENV_DATA["cluster_namespace"]
    mons = (
        f'rook-ceph-mon-a.{namespace}'
        f'.svc.cluster.local:6789,'
        f'rook-ceph-mon-b.{namespace}.'
        f'svc.cluster.local:6789,'
        f'rook-ceph-mon-c.{namespace}'
        f'.svc.cluster.local:6789'
    )
    log.info("Creating a Storage Class")
    sc_data = defaults.CSI_RBD_STORAGECLASS_DICT.copy()
    sc_data['metadata']['name'] = sc_name
    sc_data['parameters']['monitors'] = mons

    global SC_OBJ
    SC_OBJ = OCS(**sc_data)

    # Check for expected failure with duplicate SC name
    try:
        SC_OBJ.create()
        assert not expect_fail, (
            "SC creation with same name passed. Expected to fail !!"
        )
        log.info(
            f"Storage class: {SC_OBJ.name} created successfully !!"
        )
        log.debug(sc_data)

    except CommandFailed as ecf:
        assert "AlreadyExists" in str(ecf)
        log.error(
            f"Cannot create two StorageClasses with same name !! \n"
            f"{ecf}"
        )
def setup(self):
    """
    Setting up the environment for the test
    """
    # Create a storage class
    log.info("Creating a Storage Class")
    self.sc_data = defaults.CSI_RBD_STORAGECLASS_DICT.copy()
    self.sc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'csi-rbd')
    global SC_OBJ
    SC_OBJ = OCS(**self.sc_data)
    assert SC_OBJ.create()
    log.info(f"Storage class: {SC_OBJ.name} created successfully")
    log.debug(self.sc_data)
Пример #6
0
def setup_fs(self):
    """
    Setting up the environment for the test
    """
    global CEPH_OBJ
    self.fs_data = defaults.CEPHFILESYSTEM_DICT.copy()
    self.fs_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'cephfs')
    self.fs_data['metadata']['namespace'] = ENV_DATA['cluster_namespace']
    CEPH_OBJ = OCS(**self.fs_data)
    CEPH_OBJ.create()
    assert POD.wait_for_resource(condition='Running',
                                 selector='app=rook-ceph-mds')
    pods = POD.get(selector='app=rook-ceph-mds')['items']
    assert len(pods) == 2
Пример #7
0
 def test_ocs_346(self):
     """
     Testing basics: secret creation,
      storage class creation  and pvc with rbd
     """
     self.rbd_secret = defaults.CSI_RBD_SECRET.copy()
     del self.rbd_secret['data']['kubernetes']
     self.rbd_secret['data']['admin'] = get_admin_key_from_ceph_tools()
     logging.info(self.rbd_secret)
     secret = OCS(**self.rbd_secret)
     secret.create()
     self.rbd_sc = defaults.CSI_RBD_STORAGECLASS_DICT.copy()
     self.rbd_sc['parameters']['monitors'] = self.mons
     del self.rbd_sc['parameters']['userid']
     storage_class = OCS(**self.rbd_sc)
     storage_class.create()
     self.rbd_pvc = defaults.CSI_RBD_PVC.copy()
     pvc = PVC(**self.rbd_pvc)
     pvc.create()
     assert 'Bound' in pvc.status
     pvc.delete()
     storage_class.delete()
     secret.delete()
Пример #8
0
 def test_ocs_336(self, test_fixture):
     """
     Testing basics: secret creation,
     storage class creation and pvc with cephfs
     """
     self.cephfs_secret = defaults.CSI_CEPHFS_SECRET.copy()
     del self.cephfs_secret['data']['userID']
     del self.cephfs_secret['data']['userKey']
     self.cephfs_secret['data']['adminKey'] = (
         get_admin_key_from_ceph_tools())
     self.cephfs_secret['data']['adminID'] = constants.ADMIN_BASE64
     logging.info(self.cephfs_secret)
     secret = OCS(**self.cephfs_secret)
     secret.create()
     self.cephfs_sc = defaults.CSI_CEPHFS_STORAGECLASS_DICT.copy()
     self.cephfs_sc['parameters']['monitors'] = self.mons
     self.cephfs_sc['parameters']['pool'] = (
         f"{self.fs_data['metadata']['name']}-data0")
     storage_class = OCS(**self.cephfs_sc)
     storage_class.create()
     self.cephfs_pvc = defaults.CSI_CEPHFS_PVC.copy()
     pvc = PVC(**self.cephfs_pvc)
     pvc.create()
     log.info(pvc.status)
     assert 'Bound' in pvc.status
     pvc.delete()
     storage_class.delete()
     secret.delete()
Пример #9
0
    def test_deployment(self):
        log.info("Running OCS basic installation")
        cluster_path = config.ENV_DATA['cluster_path']
        # Test cluster access and if exist just skip the deployment.
        if config.RUN['cli_params'].get('cluster_path') and OCP.set_kubeconfig(
            os.path.join(cluster_path, config.RUN.get('kubeconfig_location'))
        ):
            pytest.skip(
                "The installation is skipped cause the cluster is running"
            )

        # Generate install-config from template
        log.info("Generating install-config")
        run_cmd(f"mkdir -p {cluster_path}")
        pull_secret_path = os.path.join(
            TOP_DIR,
            "data",
            "pull-secret"
        )

        # TODO: check for supported platform and raise the exception if not
        # supported. Currently we support just AWS.

        _templating = templating.Templating()
        install_config_str = _templating.render_template(
            "install-config.yaml.j2", config.ENV_DATA
        )
        # Parse the rendered YAML so that we can manipulate the object directly
        install_config_obj = yaml.safe_load(install_config_str)
        with open(pull_secret_path, "r") as f:
            # Parse, then unparse, the JSON file.
            # We do this for two reasons: to ensure it is well-formatted, and
            # also to ensure it ends up as a single line.
            install_config_obj['pullSecret'] = json.dumps(json.loads(f.read()))
        install_config_str = yaml.safe_dump(install_config_obj)
        log.info(f"Install config: \n{install_config_str}")
        install_config = os.path.join(cluster_path, "install-config.yaml")
        with open(install_config, "w") as f:
            f.write(install_config_str)

        # Download installer
        installer = get_openshift_installer(
            config.DEPLOYMENT['installer_version']
        )
        # Download client
        get_openshift_client()

        # Deploy cluster
        log.info("Deploying cluster")
        run_cmd(
            f"{installer} create cluster "
            f"--dir {cluster_path} "
            f"--log-level debug"
        )

        # Test cluster access
        if not OCP.set_kubeconfig(
            os.path.join(cluster_path, config.RUN.get('kubeconfig_location'))
        ):
            pytest.fail("Cluster is not available!")

        # TODO: Create cluster object, add to config.ENV_DATA for other tests to
        # utilize.
        # Determine worker pattern and create ebs volumes
        with open(os.path.join(cluster_path, "terraform.tfvars")) as f:
            tfvars = json.load(f)

        cluster_id = tfvars['cluster_id']
        worker_pattern = f'{cluster_id}-worker*'
        log.info(f'Worker pattern: {worker_pattern}')
        create_ebs_volumes(worker_pattern, region_name=config.ENV_DATA['region'])

        # render templates and create resources
        create_oc_resource('common.yaml', cluster_path, _templating, config.ENV_DATA)
        run_cmd(
            f'oc label namespace {config.ENV_DATA["cluster_namespace"]} '
            f'"openshift.io/cluster-monitoring=true"'
        )
        run_cmd(
            f"oc policy add-role-to-user view "
            f"system:serviceaccount:openshift-monitoring:prometheus-k8s "
            f"-n {config.ENV_DATA['cluster_namespace']}"
        )
        apply_oc_resource(
            'csi-nodeplugin-rbac_rbd.yaml',
            cluster_path,
            _templating,
            config.ENV_DATA,
            template_dir="ocs-deployment/csi/rbd/"
        )
        apply_oc_resource(
            'csi-provisioner-rbac_rbd.yaml',
            cluster_path,
            _templating,
            config.ENV_DATA,
            template_dir="ocs-deployment/csi/rbd/"
        )
        apply_oc_resource(
            'csi-nodeplugin-rbac_cephfs.yaml',
            cluster_path,
            _templating,
            config.ENV_DATA,
            template_dir="ocs-deployment/csi/cephfs/"
        )
        apply_oc_resource(
            'csi-provisioner-rbac_cephfs.yaml',
            cluster_path,
            _templating,
            config.ENV_DATA,
            template_dir="ocs-deployment/csi/cephfs/"
        )
        # Increased to 15 seconds as 10 is not enough
        # TODO: do the sampler function and check if resource exist
        wait_time = 15
        log.info(f"Waiting {wait_time} seconds...")
        time.sleep(wait_time)
        create_oc_resource(
            'operator-openshift-with-csi.yaml', cluster_path, _templating, config.ENV_DATA
        )
        log.info(f"Waiting {wait_time} seconds...")
        time.sleep(wait_time)
        run_cmd(
            f"oc wait --for condition=ready pod "
            f"-l app=rook-ceph-operator "
            f"-n {config.ENV_DATA['cluster_namespace']} "
            f"--timeout=120s"
        )
        run_cmd(
            f"oc wait --for condition=ready pod "
            f"-l app=rook-discover "
            f"-n {config.ENV_DATA['cluster_namespace']} "
            f"--timeout=120s"
        )
        create_oc_resource('cluster.yaml', cluster_path, _templating, config.ENV_DATA)

        # Check for the Running status of Ceph Pods
        run_cmd(
            f"oc wait --for condition=ready pod "
            f"-l app=rook-ceph-agent "
            f"-n {config.ENV_DATA['cluster_namespace']} "
            f"--timeout=120s"
        )
        assert POD.wait_for_resource(
            condition='Running', selector='app=rook-ceph-mon',
            resource_count=3, timeout=600
        )
        assert POD.wait_for_resource(
            condition='Running', selector='app=rook-ceph-mgr',
            timeout=600
        )
        assert POD.wait_for_resource(
            condition='Running', selector='app=rook-ceph-osd',
            resource_count=3, timeout=600
        )

        create_oc_resource('toolbox.yaml', cluster_path, _templating, config.ENV_DATA)
        log.info(f"Waiting {wait_time} seconds...")
        time.sleep(wait_time)
        create_oc_resource(
            'storage-manifest.yaml', cluster_path, _templating, config.ENV_DATA
        )
        create_oc_resource(
            "service-monitor.yaml", cluster_path, _templating, config.ENV_DATA
        )
        create_oc_resource(
            "prometheus-rules.yaml", cluster_path, _templating, config.ENV_DATA
        )
        log.info(f"Waiting {wait_time} seconds...")
        time.sleep(wait_time)

        # Create MDS pods for CephFileSystem
        self.fs_data = copy.deepcopy(defaults.CEPHFILESYSTEM_DICT)
        self.fs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace']

        global CEPH_OBJ
        CEPH_OBJ = OCS(**self.fs_data)
        CEPH_OBJ.create()
        assert POD.wait_for_resource(
            condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mds',
            resource_count=2, timeout=600
        )

        # Check for CephFilesystem creation in ocp
        cfs_data = CFS.get()
        cfs_name = cfs_data['items'][0]['metadata']['name']

        if helpers.validate_cephfilesystem(cfs_name):
            log.info(f"MDS deployment is successful!")
        else:
            log.error(
                f"MDS deployment Failed! Please check logs!"
            )

        # Verify health of ceph cluster
        # TODO: move destroy cluster logic to new CLI usage pattern?
        log.info("Done creating rook resources, waiting for HEALTH_OK")
        assert ceph_health_check(namespace=config.ENV_DATA['cluster_namespace'])