def test_basics_cephfs(self):
     """
     Testing basics: secret creation,
      storage class creation  and pvc with rbd
     """
     self.rbd_secret = templating.load_yaml_to_dict(
         constants.CSI_RBD_SECRET_YAML)
     del self.rbd_secret['data']['kubernetes']
     self.rbd_secret['data']['admin'] = get_admin_key_from_ceph_tools()
     logging.info(self.rbd_secret)
     secret = OCS(**self.rbd_secret)
     secret.create()
     self.rbd_sc = templating.load_yaml_to_dict(
         constants.CSI_RBD_STORAGECLASS_YAML)
     self.rbd_sc['parameters']['monitors'] = self.mons
     del self.rbd_sc['parameters']['userid']
     storage_class = OCS(**self.rbd_sc)
     storage_class.create()
     self.rbd_pvc = templating.load_yaml_to_dict(constants.CSI_RBD_PVC_YAML)
     pvc = PVC(**self.rbd_pvc)
     pvc.create()
     assert 'Bound' in pvc.status
     pvc.delete()
     storage_class.delete()
     secret.delete()
Пример #2
0
def default_storageclasses(request, teardown_factory_session):
    """
    Returns dictionary with storageclasses. Keys represent reclaim policy of
    storageclass. There are two storageclasses for each key. First is RBD based
    and the second one is CephFS based. Storageclasses with Retain Reclaim
    Policy are created from default storageclasses.
    """
    scs = {
        constants.RECLAIM_POLICY_DELETE: [],
        constants.RECLAIM_POLICY_RETAIN: []
    }

    # TODO(fbalak): Use proper constants after
    # https://github.com/red-hat-storage/ocs-ci/issues/1056
    # is resolved
    for sc_name in ('ocs-storagecluster-ceph-rbd',
                    'ocs-storagecluster-cephfs'):
        sc = OCS(kind=constants.STORAGECLASS, metadata={'name': sc_name})
        sc.reload()
        scs[constants.RECLAIM_POLICY_DELETE].append(sc)
        sc.data['reclaimPolicy'] = constants.RECLAIM_POLICY_RETAIN
        sc.data['metadata']['name'] += '-retain'
        sc._name = sc.data['metadata']['name']
        sc.create()
        teardown_factory_session(sc)
        scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
    return scs
def create_storageclass(sc_name, expect_fail=False):
    """
    Function to create a storage class and check for
    duplicate storage class name

    Args:
        sc_name (str): name of the storageclass to be created
        expect_fail (bool): To catch the incorrect scenario if
            two SCs are indeed created with same name

    Returns:
        None

    """

    # Create a storage class
    sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML)
    sc_data["metadata"]["name"] = sc_name
    sc_data["parameters"]["clusterID"] = defaults.ROOK_CLUSTER_NAMESPACE

    global SC_OBJ
    SC_OBJ = OCS(**sc_data)

    # Check for expected failure with duplicate SC name
    try:
        SC_OBJ.create()
        assert not expect_fail, "SC creation with same name passed. Expected to fail !"
        log.info(f"Storage class: {SC_OBJ.name} created successfully !")
        log.debug(sc_data)

    except CommandFailed as ecf:
        assert "AlreadyExists" in str(ecf)
        log.info(f"Cannot create two StorageClasses with same name !"
                 f" Error message:  \n"
                 f"{ecf}")
Пример #4
0
def setup_ceph_toolbox():
    """
    Setup ceph-toolbox - also checks if toolbox exists, if it exists it
    behaves as noop.
    """
    namespace = ocsci_config.ENV_DATA['cluster_namespace']
    ceph_toolbox = get_pod_name_by_pattern('rook-ceph-tools', namespace)
    if len(ceph_toolbox) == 1:
        log.info("Ceph toolbox already exists, skipping")
        return
    if ocsci_config.ENV_DATA.get("ocs_version") == '4.2':
        rook_operator = get_pod_name_by_pattern('rook-ceph-operator',
                                                namespace)
        out = run_cmd(
            f'oc -n {namespace} get pods {rook_operator[0]} -o yaml', )
        version = yaml.safe_load(out)
        rook_version = version['spec']['containers'][0]['image']
        tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
        tool_box_data['spec']['template']['spec']['containers'][0][
            'image'] = rook_version
        rook_toolbox = OCS(**tool_box_data)
        rook_toolbox.create()
    else:
        # for OCS >= 4.3 there is new toolbox pod deployment done here:
        # https://github.com/openshift/ocs-operator/pull/207/
        log.info("starting ceph toolbox pod")
        run_cmd(
            'oc patch ocsinitialization ocsinit -n openshift-storage --type '
            'json --patch  \'[{ "op": "replace", "path": '
            '"/spec/enableCephTools", "value": true }]\'')
 def test_basics_rbd(self, test_fixture):
     """
     Testing basics: secret creation,
     storage class creation and pvc with cephfs
     """
     self.cephfs_secret = templating.load_yaml_to_dict(
         constants.CSI_CEPHFS_SECRET_YAML)
     del self.cephfs_secret['data']['userID']
     del self.cephfs_secret['data']['userKey']
     self.cephfs_secret['data']['adminKey'] = (
         get_admin_key_from_ceph_tools())
     self.cephfs_secret['data']['adminID'] = constants.ADMIN_BASE64
     logging.info(self.cephfs_secret)
     secret = OCS(**self.cephfs_secret)
     secret.create()
     self.cephfs_sc = templating.load_yaml_to_dict(
         constants.CSI_CEPHFS_STORAGECLASS_YAML)
     self.cephfs_sc['parameters']['monitors'] = self.mons
     self.cephfs_sc['parameters']['pool'] = (
         f"{self.fs_data['metadata']['name']}-data0")
     storage_class = OCS(**self.cephfs_sc)
     storage_class.create()
     self.cephfs_pvc = templating.load_yaml_to_dict(
         constants.CSI_CEPHFS_PVC_YAML)
     pvc = PVC(**self.cephfs_pvc)
     pvc.create()
     log.info(pvc.status)
     assert 'Bound' in pvc.status
     pvc.delete()
     storage_class.delete()
     secret.delete()
Пример #6
0
    def create_ocs_jenkins_template(self):
        """

        Create OCS Jenkins Template
        """
        log.info("Create Jenkins Template, jenkins-persistent-ocs")
        ocp_obj = OCP(namespace="openshift", kind="template")
        tmp_dict = ocp_obj.get(resource_name="jenkins-persistent",
                               out_yaml_format=True)
        tmp_dict["labels"]["app"] = "jenkins-persistent-ocs"
        tmp_dict["labels"]["template"] = "jenkins-persistent-ocs-template"
        tmp_dict["metadata"]["name"] = "jenkins-persistent-ocs"
        # Find Kind: 'PersistentVolumeClaim' position in the objects list, differs in OCP 4.5 and OCP 4.6.
        for i in range(len(tmp_dict["objects"])):
            if tmp_dict["objects"][i]["kind"] == constants.PVC:
                tmp_dict["objects"][i]["metadata"]["annotations"] = {
                    "volume.beta.kubernetes.io/storage-class":
                    "ocs-storagecluster-ceph-rbd"
                }

        tmp_dict["parameters"][4]["value"] = "10Gi"
        tmp_dict["parameters"].append({
            "description":
            "Override jenkins options to speed up slave spawning",
            "displayName":
            "Override jenkins options to speed up slave spawning",
            "name":
            "JAVA_OPTS",
            "value":
            "-Dhudson.slaves.NodeProvisioner.initialDelay=0 "
            "-Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson."
            "slaves.NodeProvisioner.MARGIN0=0.85",
        })
        ocs_jenkins_template_obj = OCS(**tmp_dict)
        ocs_jenkins_template_obj.create()
Пример #7
0
    def create_ocs_jenkins_template(self):
        """

        Create OCS Jenkins Template
        """
        log.info("Create Jenkins Template, jenkins-persistent-ocs")
        ocp_obj = OCP(namespace='openshift', kind='template')
        tmp_dict = ocp_obj.get(
            resource_name='jenkins-persistent', out_yaml_format=True
        )
        tmp_dict['labels']['app'] = 'jenkins-persistent-ocs'
        tmp_dict['labels']['template'] = 'jenkins-persistent-ocs-template'
        tmp_dict['metadata']['name'] = 'jenkins-persistent-ocs'
        tmp_dict['objects'][1]['metadata']['annotations'] = {
            'volume.beta.kubernetes.io/storage-class': 'ocs-storagecluster-ceph-rbd'
        }
        tmp_dict['objects'][2]['spec']['template']['spec']['containers'][0]['env'].append(
            {'name': 'JAVA_OPTS', 'value': '${JAVA_OPTS}'})
        tmp_dict['parameters'][4]['value'] = '10Gi'
        tmp_dict['parameters'].append({
            'description': "Override jenkins options to speed up slave spawning",
            'displayName': 'Override jenkins options to speed up slave spawning',
            'name': 'JAVA_OPTS',
            'value': "-Dhudson.slaves.NodeProvisioner.initialDelay=0 "
                     "-Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson."
                     "slaves.NodeProvisioner.MARGIN0=0.85"
        })
        ocs_jenkins_template_obj = OCS(**tmp_dict)
        ocs_jenkins_template_obj.create()
Пример #8
0
def create_cephfilesystem():
    """
    Function for deploying CephFileSystem (MDS)

    Returns:
        bool: True if CephFileSystem creates successful
    """
    fs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML)
    fs_data['metadata']['name'] = create_unique_resource_name(
        'test', 'cephfs'
    )
    fs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace']
    global CEPHFS_OBJ
    CEPHFS_OBJ = OCS(**fs_data)
    CEPHFS_OBJ.create()
    POD = pod.get_all_pods(
        namespace=defaults.ROOK_CLUSTER_NAMESPACE
    )
    for pod_names in POD:
        if 'rook-ceph-mds' in pod_names.labels.values():
            assert pod_names.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector='app=rook-ceph-mds'
            )
    assert validate_cephfilesystem(fs_name=fs_data['metadata']['name'])
    return True
Пример #9
0
    def create_pgbench_benchmark(self,
                                 replicas,
                                 clients=None,
                                 threads=None,
                                 transactions=None,
                                 scaling_factor=None,
                                 timeout=None):
        """
        Create pgbench benchmark pods

        Args:
            replicas (int): Number of pgbench pods to be deployed
            clients (int): Number of clients
            threads (int): Number of threads
            transactions (int): Number of transactions
            scaling_factor (int): scaling factor
            timeout (int): Time in seconds to wait

        Returns:
            List: pgbench pod objects list

        """
        pg_obj_list = []
        for i in range(replicas):
            log.info("Create resource file for pgbench workload")
            pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
            pg_data['metadata']['name'] = 'pgbench-benchmark' + f"{i}"
            pg_data['spec']['workload']['args']['databases'][0][
                'host'] = "postgres-" + f"{i}" + ".postgres"

            if clients is not None:
                pg_data['spec']['workload']['args']['clients'][0] = clients
            if threads is not None:
                pg_data['spec']['workload']['args']['threads'] = threads
            if transactions is not None:
                pg_data['spec']['workload']['args'][
                    'transactions'] = transactions
            if scaling_factor is not None:
                pg_data['spec']['workload']['args'][
                    'scaling_factor'] = scaling_factor
            pg_obj = OCS(**pg_data)
            pg_obj_list.append(pg_obj)
            pg_obj.create()
        # Confirm that expected pgbench pods are spinned
        log.info("Checking if Getting pgbench pods name")
        timeout = timeout if timeout else 300
        for pgbench_pods in TimeoutSampler(timeout, replicas,
                                           get_pod_name_by_pattern,
                                           'pgbench-1-dbs-client',
                                           RIPSAW_NAMESPACE):
            try:
                if len(pgbench_pods) == replicas:
                    log.info(f"Expected number of pgbench pods are "
                             f"found: {replicas}")
                    break
            except IndexError:
                log.info(f'Expected number of pgbench pods are {replicas} '
                         f'but only found {len(pgbench_pods)}')
        return pg_obj_list
Пример #10
0
    def test_fio_workload_simple(self, ripsaw, interface, io_pattern):
        """
        This is a basic fio perf test
        """
        # Deployment ripsaw
        log.info("Deploying ripsaw operator")
        ripsaw.apply_crd(
            'resources/crds/'
            'ripsaw_v1alpha1_ripsaw_crd.yaml'
        )
        sc = 'ocs-storagecluster-ceph-rbd' if interface == 'CephBlockPool' else 'ocs-storagecluster-cephfs'

        # Create fio benchmark
        log.info("Create resource file for fio workload")
        fio_cr = templating.load_yaml(constants.FIO_CR_YAML)
        # Todo: have pvc_size set to 'get_osd_pods_memory_sum * 5'
        #  once pr-2037 is merged
        fio_cr['spec']['clustername'] = config.ENV_DATA['platform'] + get_build() + get_ocs_version()
        fio_cr['spec']['test_user'] = get_ocs_version() + interface + io_pattern
        fio_cr['spec']['workload']['args']['storageclass'] = sc
        if io_pattern == 'sequential':
            fio_cr['spec']['workload']['args']['jobs'] = ['write', 'read']
        log.info(f'fio_cr: {fio_cr}')
        fio_cr_obj = OCS(**fio_cr)
        fio_cr_obj.create()

        # Wait for fio client pod to be created
        for fio_pod in TimeoutSampler(
            300, 20, get_pod_name_by_pattern, 'fio-client', 'my-ripsaw'
        ):
            try:
                if fio_pod[0] is not None:
                    fio_client_pod = fio_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        # Wait for fio pod to initialized and complete
        log.info("Waiting for fio_client to complete")
        pod_obj = OCP(kind='pod')
        pod_obj.wait_for_resource(
            condition='Completed',
            resource_name=fio_client_pod,
            timeout=18000,
            sleep=300,
        )

        output = run_cmd(f'oc logs {fio_client_pod}')

        try:
            if 'Fio failed to execute' not in output:
                log.info("FIO has completed successfully")
        except IOError:
            log.info("FIO failed to complete")

        # Clean up fio benchmark
        log.info("Deleting FIO benchmark")
        fio_cr_obj.delete()
        analyze_regression(io_pattern, sc, es_username=fio_cr['spec']['test_user'])
Пример #11
0
def setup_ceph_toolbox(force_setup=False):
    """
    Setup ceph-toolbox - also checks if toolbox exists, if it exists it
    behaves as noop.

    Args:
        force_setup (bool): force setup toolbox pod

    """
    namespace = ocsci_config.ENV_DATA["cluster_namespace"]
    ceph_toolbox = get_pod_name_by_pattern("rook-ceph-tools", namespace)
    # setup toolbox for external mode
    # Refer bz: 1856982 - invalid admin secret
    if len(ceph_toolbox) == 1:
        log.info("Ceph toolbox already exists, skipping")
        if force_setup:
            log.info("Running force setup for Ceph toolbox!")
        else:
            return
    external_mode = ocsci_config.DEPLOYMENT.get("external_mode")

    if ocsci_config.ENV_DATA.get("ocs_version") == "4.2":
        rook_operator = get_pod_name_by_pattern("rook-ceph-operator",
                                                namespace)
        out = run_cmd(
            f"oc -n {namespace} get pods {rook_operator[0]} -o yaml", )
        version = yaml.safe_load(out)
        rook_version = version["spec"]["containers"][0]["image"]
        tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
        tool_box_data["spec"]["template"]["spec"]["containers"][0][
            "image"] = rook_version
        rook_toolbox = OCS(**tool_box_data)
        rook_toolbox.create()
    else:
        if external_mode:
            toolbox = templating.load_yaml(constants.TOOL_POD_YAML)
            toolbox["metadata"]["name"] += "-external"
            keyring_dict = ocsci_config.EXTERNAL_MODE.get("admin_keyring")
            env = [{"name": "ROOK_ADMIN_SECRET", "value": keyring_dict["key"]}]
            toolbox["spec"]["template"]["spec"]["containers"][0]["env"] = env
            # add ceph volumeMounts
            ceph_volume_mount_path = {
                "mountPath": "/etc/ceph",
                "name": "ceph-config"
            }
            ceph_volume = {"name": "ceph-config", "emptyDir": {}}
            toolbox["spec"]["template"]["spec"]["containers"][0][
                "volumeMounts"].append(ceph_volume_mount_path)
            toolbox["spec"]["template"]["spec"]["volumes"].append(ceph_volume)
            rook_toolbox = OCS(**toolbox)
            rook_toolbox.create()
            return
        # for OCS >= 4.3 there is new toolbox pod deployment done here:
        # https://github.com/openshift/ocs-operator/pull/207/
        log.info("starting ceph toolbox pod")
        run_cmd(
            "oc patch ocsinitialization ocsinit -n openshift-storage --type "
            'json --patch  \'[{ "op": "replace", "path": '
            '"/spec/enableCephTools", "value": true }]\'')
Пример #12
0
    def setup_amq_cluster_operator(self, namespace=constants.AMQ_NAMESPACE):
        """
        Function to setup amq-cluster_operator,
        the file is pulling from github
        it will make sure cluster-operator pod is running

        Args:
            namespace (str): Namespace for AMQ pods

        """

        # Namespace for amq
        try:
            self.create_namespace(namespace)
        except CommandFailed as ef:
            if f'project.project.openshift.io "{namespace}" already exists' not in str(
                ef
            ):
                raise ef

        # Create strimzi-cluster-operator pod
        run(
            f"for i in `(ls strimzi-kafka-operator/packaging/install/cluster-operator/)`;"
            f"do sed 's/{namespace}/myproject/g' "
            f"strimzi-kafka-operator/packaging/install/cluster-operator/$i;done",
            shell=True,
            check=True,
            cwd=self.dir,
        )
        self.strimzi_kafka_operator = os.path.join(self.dir, self.amq_dir)
        pf_files = os.listdir(self.strimzi_kafka_operator)
        crds = []
        for crd in pf_files:
            crds.append(crd)
        self.crd_objects = []
        for adm_yaml in crds:
            try:
                adm_data = templating.load_yaml(self.strimzi_kafka_operator + adm_yaml)
                adm_obj = OCS(**adm_data)
                adm_obj.create()
                self.crd_objects.append(adm_obj)
            except (CommandFailed, CalledProcessError) as cfe:
                if "Error is Error from server (AlreadyExists):" in str(cfe):
                    log.warn(
                        "Some amq leftovers are present, please cleanup the cluster"
                    )
                    pytest.skip(
                        "AMQ leftovers are present needs to cleanup the cluster"
                    )
        time.sleep(30)
        #  Check strimzi-cluster-operator pod created
        if self.is_amq_pod_running(pod_pattern="cluster-operator", expected_pods=1):
            log.info("strimzi-cluster-operator pod is in running state")
        else:
            raise ResourceWrongStatusException(
                "strimzi-cluster-operator pod is not getting to running state"
            )
Пример #13
0
def setup_ceph_toolbox(force_setup=False):
    """
    Setup ceph-toolbox - also checks if toolbox exists, if it exists it
    behaves as noop.

    Args:
        force_setup (bool): force setup toolbox pod

    """
    namespace = ocsci_config.ENV_DATA['cluster_namespace']
    ceph_toolbox = get_pod_name_by_pattern('rook-ceph-tools', namespace)
    # setup toolbox for independent mode
    # Refer bz: 1856982 - invalid admin secret
    if len(ceph_toolbox) == 1:
        log.info("Ceph toolbox already exists, skipping")
        if force_setup:
            log.info("Running force setup for Ceph toolbox!")
        else:
            return
    independent_mode = ocsci_config.DEPLOYMENT.get("independent_mode")
    if independent_mode:
        toolbox = templating.load_yaml(constants.TOOL_POD_YAML)
        keyring_dict = ocsci_config.INDEPENDENT_MODE.get("admin_keyring")
        env = [{'name': 'ROOK_ADMIN_SECRET', 'value': keyring_dict['key']}]
        toolbox['spec']['template']['spec']['containers'][0]['env'] = env
        rook_toolbox = OCS(**toolbox)
        rook_toolbox.create()
    elif ocsci_config.ENV_DATA.get("ocs_version") == '4.2':
        rook_operator = get_pod_name_by_pattern(
            'rook-ceph-operator', namespace
        )
        out = run_cmd(
            f'oc -n {namespace} get pods {rook_operator[0]} -o yaml',
        )
        version = yaml.safe_load(out)
        rook_version = version['spec']['containers'][0]['image']
        tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
        tool_box_data['spec']['template']['spec']['containers'][0][
            'image'
        ] = rook_version
        rook_toolbox = OCS(**tool_box_data)
        rook_toolbox.create()
    else:
        # for OCS >= 4.3 there is new toolbox pod deployment done here:
        # https://github.com/openshift/ocs-operator/pull/207/
        log.info("starting ceph toolbox pod")
        run_cmd(
            'oc patch ocsinitialization ocsinit -n openshift-storage --type '
            'json --patch  \'[{ "op": "replace", "path": '
            '"/spec/enableCephTools", "value": true }]\''
        )
Пример #14
0
def setup_ceph_toolbox():
    """
    Setup ceph-toolbox based
    """
    namespace = ocsci_config.ENV_DATA['cluster_namespace']
    rook_operator = get_pod_name_by_pattern('rook-ceph-operator', namespace)
    out = run_cmd(f'oc -n {namespace} get pods {rook_operator[0]} -o yaml', )
    version = yaml.safe_load(out)
    rook_version = version['spec']['containers'][0]['image']
    tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
    tool_box_data['spec']['template']['spec']['containers'][0][
        'image'] = rook_version
    rook_toolbox = OCS(**tool_box_data)
    rook_toolbox.create()
Пример #15
0
    def test_sql_workload_simple(self, ripsaw):
        """
        This is a basic pgsql workload
        """
        # Deployment postgres
        log.info("Deploying postgres database")
        ripsaw.apply_crd('resources/crds/' 'ripsaw_v1alpha1_ripsaw_crd.yaml')
        ripsaw.setup_postgresql()

        # Create pgbench benchmark
        log.info("Create resource file for pgbench workload")
        pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
        pg_obj = OCS(**pg_data)
        pg_obj.create()

        # Wait for pgbench pod to be created
        for pgbench_pod in TimeoutSampler(300, 3, get_pod_name_by_pattern,
                                          'pgbench-1-dbs-client', 'my-ripsaw'):
            try:
                if pgbench_pod[0] is not None:
                    pgbench_client_pod = pgbench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        # Wait for pg_bench pod to initialized and complete
        log.info("Waiting for pgbench_client to complete")
        pod_obj = OCP(kind='pod')
        pod_obj.wait_for_resource(
            condition='Completed',
            resource_name=pgbench_client_pod,
            timeout=800,
            sleep=10,
        )

        # Running pgbench and parsing logs
        output = run_cmd(f'oc logs {pgbench_client_pod}')
        pg_output = utils.parse_pgsql_logs(output)
        log.info("*******PGBench output log*********\n" f"{pg_output}")
        for data in pg_output:
            latency_avg = data['latency_avg']
            if not latency_avg:
                raise UnexpectedBehaviour("PGBench failed to run, "
                                          "no data found on latency_avg")
        log.info("PGBench has completed successfully")

        # Clean up pgbench benchmark
        log.info("Deleting PG bench benchmark")
        pg_obj.delete()
Пример #16
0
    def create_ocs_jenkins_template(self):
        """

        Create OCS Jenkins Template
        """
        log.info("Create Jenkins Template, jenkins-persistent-ocs")
        ocp_obj = OCP(namespace="openshift", kind="template")
        tmp_dict = ocp_obj.get(resource_name="jenkins-persistent",
                               out_yaml_format=True)
        tmp_dict["labels"]["app"] = "jenkins-persistent-ocs"
        tmp_dict["labels"]["template"] = "jenkins-persistent-ocs-template"
        tmp_dict["metadata"]["name"] = "jenkins-persistent-ocs"
        # Find Kind: 'PersistentVolumeClaim' position in the objects list, differs in OCP 4.5 and OCP 4.6.
        sc_name = (constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
                   if storagecluster_independent_check() else
                   constants.DEFAULT_STORAGECLASS_RBD)
        for i in range(len(tmp_dict["objects"])):
            if tmp_dict["objects"][i]["kind"] == constants.PVC:
                tmp_dict["objects"][i]["metadata"]["annotations"] = {
                    "volume.beta.kubernetes.io/storage-class": sc_name
                }

        tmp_dict["parameters"][4]["value"] = "10Gi"
        tmp_dict["parameters"].append({
            "description":
            "Override jenkins options to speed up slave spawning",
            "displayName":
            "Override jenkins options to speed up slave spawning",
            "name":
            "JAVA_OPTS",
            "value":
            "-Dhudson.slaves.NodeProvisioner.initialDelay=0 "
            "-Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson."
            "slaves.NodeProvisioner.MARGIN0=0.85",
        })
        if Version.coerce(self.ocp_version) >= Version.coerce("4.8"):
            # Added "Pipeline Utility Steps" plugin via Jenkins Template
            # OCP team changed the default plugin list on OCP4.9
            tmp_dict["objects"][3]["spec"]["template"]["spec"]["containers"][
                0]["env"].append({
                    "name":
                    "INSTALL_PLUGINS",
                    "value":
                    "scm-api:2.6.5,pipeline-utility-steps:2.12.0,workflow-step-api:622."
                    "vb_8e7c15b_c95a_,workflow-cps:2648.va9433432b33c,workflow-api:2.47",
                })
        ocs_jenkins_template_obj = OCS(**tmp_dict)
        ocs_jenkins_template_obj.create()
Пример #17
0
    def test_sql_workload_simple(self, ripsaw):
        """
        This is a basic pgsql workload
        """
        # Deployment postgres
        log.info("Deploying postgres database")
        ripsaw.apply_crd('resources/crds/' 'ripsaw_v1alpha1_ripsaw_crd.yaml')
        ripsaw.setup_postgresql()
        run_cmd('bin/oc wait --for condition=ready pod '
                '-l app=postgres '
                '--timeout=120s')

        # Create pgbench benchmark
        log.info("Create resource file for pgbench workload")
        pg_data = templating.load_yaml_to_dict(constants.PGSQL_BENCHMARK_YAML)
        pg_obj = OCS(**pg_data)
        pg_obj.create()
        # Wait for pgbench pod to be created
        log.info("waiting for pgbench benchmark to create, "
                 f"PGbench pod name: {pg_obj.name} ")
        wait_time = 30
        log.info(f"Waiting {wait_time} seconds...")
        time.sleep(wait_time)

        pgbench_pod = run_cmd('bin/oc get pods -l '
                              'app=pgbench-client -o name')
        pgbench_pod = pgbench_pod.split('/')[1]
        run_cmd('bin/oc wait --for condition=Initialized '
                f'pods/{pgbench_pod} '
                '--timeout=60s')
        run_cmd('bin/oc wait --for condition=Complete jobs '
                '-l app=pgbench-client '
                '--timeout=300s')

        # Running pgbench and parsing logs
        output = run_cmd(f'bin/oc logs {pgbench_pod}')
        pg_output = utils.parse_pgsql_logs(output)
        log.info("*******PGBench output log*********\n" f"{pg_output}")
        for data in pg_output:
            latency_avg = data['latency_avg']
            if not latency_avg:
                raise UnexpectedBehaviour("PGBench failed to run, "
                                          "no data found on latency_avg")
        log.info("PGBench has completed successfully")

        # Clean up pgbench benchmark
        log.info("Deleting PG bench benchmark:")
        pg_obj.delete()
Пример #18
0
    def test_verify_all_fields_in_sc_yaml_with_oc_describe(self, interface):
        """
        Test function to create RBD and CephFS SC, and match with oc describe sc
        output
        """
        log.info(f"Creating a {interface} storage class")
        self.sc_data = templating.load_yaml(
            getattr(constants, f"CSI_{interface}_STORAGECLASS_YAML"))
        self.sc_data['metadata']['name'] = (
            helpers.create_unique_resource_name('test',
                                                f'csi-{interface.lower()}'))
        global SC_OBJ
        SC_OBJ = OCS(**self.sc_data)
        assert SC_OBJ.create()
        log.info(
            f"{interface}Storage class: {SC_OBJ.name} created successfully")
        log.info(self.sc_data)

        # Get oc describe sc output
        describe_out = SC_OBJ.get("sc")
        log.info(describe_out)

        # Confirm that sc yaml details matches oc describe sc output
        value = {
            k: describe_out[k]
            for k in set(describe_out) - set(self.sc_data)
        }
        assert len(value) == 1 and value['volumeBindingMode'] == 'Immediate', (
            "OC describe sc output didn't match storage class yaml")
        log.info("OC describe sc output matches storage class yaml")
        # Delete Storage Class
        log.info(f"Deleting Storageclass: {SC_OBJ.name}")
        assert SC_OBJ.delete()
        log.info(f"Storage Class: {SC_OBJ.name} deleted successfully")
        del SC_OBJ
Пример #19
0
def invalid_storageclass(request):
    """
    Creates a CephFS or RBD StorageClass with invalid parameters.

    Storageclass is removed at the end of test.

    Returns:
        str: Name of created StorageClass
    """
    logger.info(f"SETUP - creating storageclass "
                f"{request.param['values']['storageclass_name']}")
    yaml_path = os.path.join(request.param['template_dir'],
                             "storageclass.yaml")
    with open(yaml_path, 'r') as fd:
        yaml_data = yaml.safe_load(fd)
    yaml_data.update(request.param['values'])
    storageclass = OCS(**yaml_data)
    sc_data = storageclass.create()

    logger.debug('Check that storageclass has assigned creationTimestamp')
    assert sc_data['metadata']['creationTimestamp']

    yield sc_data

    logger.info(f"TEARDOWN - removing storageclass "
                f"{request.param['values']['storageclass_name']}")
    storageclass.delete()
Пример #20
0
def increase_pods_per_worker_node_count(pods_per_node=500, pods_per_core=10):
    """
    Function to increase pods per node count, default OCP supports 250 pods per node,
    from OCP 4.6 limit is going to be 500, but using this function can override this param
    to create more pods per worker nodes.
    more detail: https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-managing-max-pods.html

    Example: The default value for podsPerCore is 10 and the default value for maxPods is 250.
    This means that unless the node has 25 cores or more, by default, podsPerCore will be the limiting factor.

    WARN: This function will perform Unscheduling of workers and reboot so
    Please aware if there is any non-dc pods then expected to be terminated.

    Args:
        pods_per_node (int): Pods per node limit count
        pods_per_core (int): Pods per core limit count

    Raise:
        UnexpectedBehaviour if machineconfigpool not in Updating state within 40secs.

    """
    max_pods_template = templating.load_yaml(
        constants.PODS_PER_NODE_COUNT_YAML)
    max_pods_template["spec"]["kubeletConfig"]["podsPerCore"] = pods_per_core
    max_pods_template["spec"]["kubeletConfig"]["maxPods"] = pods_per_node

    # Create new max-pods label
    max_pods_obj = OCS(**max_pods_template)
    assert max_pods_obj.create()

    # Apply the changes in the workers
    label_cmd = "label machineconfigpool worker custom-kubelet=small-pods"
    ocp = OCP()
    assert ocp.exec_oc_cmd(command=label_cmd)

    # First wait for Updating status to become True, default it will be False &
    # machine_count and ready_machine_count will be equal
    get_cmd = "get machineconfigpools -o yaml"
    timout_counter = 0
    while True:
        output = ocp.exec_oc_cmd(command=get_cmd)
        update_status = (output.get("items")[1].get("status").get("conditions")
                         [4].get("status"))
        if update_status == "True":
            break
        elif timout_counter >= 8:
            raise UnexpectedBehaviour(
                "After 40sec machineconfigpool not in Updating state")
        else:
            logging.info("Sleep 5secs for updating status change")
            timout_counter += 1
            time.sleep(5)

    # Validate either change is successful
    output = ocp.exec_oc_cmd(command=get_cmd)
    machine_count = output.get("items")[1].get("status").get("machineCount")
    # During manual execution observed each node took 240+ sec for update
    timeout = machine_count * 300
    utils.wait_for_machineconfigpool_status(node_type=constants.WORKER_MACHINE,
                                            timeout=timeout)
Пример #21
0
    def create_jenkins_build_config(self):
        """
        create jenkins build config

        """
        for project in self.projects:
            try:
                log.info(f'create build config on {project}')
                jenkins_build_config = templating.load_yaml(
                    constants.JENKINS_BUILDCONFIG_YAML)
                jenkins_build_config['metadata']['namespace'] = project
                jenkins_build_config_obj = OCS(**jenkins_build_config)
                jenkins_build_config_obj.create()
            except (CommandFailed, CalledProcessError) as cf:
                log.error('Failed to create Jenkins build config')
                raise cf
Пример #22
0
def create_pvc_snapshot(pvc_name,
                        snap_yaml,
                        snap_name,
                        sc_name=None,
                        wait=False):
    """
    Create snapshot of a PVC

    Args:
        pvc_name (str): Name of the PVC
        snap_yaml (str): The path of snapshot yaml
        snap_name (str): The name of the snapshot to be created
        sc_name (str): The name of the snapshot class
        wait (bool): True to wait for snapshot to be ready, False otherwise

    Returns:
        OCS object
    """
    snapshot_data = templating.load_yaml(snap_yaml)
    snapshot_data['metadata']['name'] = snap_name
    if sc_name:
        snapshot_data['spec']['volumeSnapshotClassName'] = sc_name
    snapshot_data['spec']['source']['persistentVolumeClaimName'] = pvc_name
    ocs_obj = OCS(**snapshot_data)
    created_snap = ocs_obj.create(do_reload=True)
    assert created_snap, f"Failed to create snapshot {snap_name}"
    if wait:
        ocs_obj.ocp.wait_for_resource(condition='true',
                                      resource_name=ocs_obj.name,
                                      column=constants.STATUS_READYTOUSE,
                                      timeout=60)
    return ocs_obj
Пример #23
0
def create_resource(desired_status=constants.STATUS_AVAILABLE,
                    wait=True,
                    **kwargs):
    """
    Create a resource

    Args:
        desired_status (str): The status of the resource to wait for
        wait (bool): True for waiting for the resource to reach the desired
            status, False otherwise
        kwargs (dict): Dictionary of the OCS resource

    Returns:
        OCS: An OCS instance

    Raises:
        AssertionError: In case of any failure
    """
    ocs_obj = OCS(**kwargs)
    resource_name = kwargs.get('metadata').get('name')
    created_resource = ocs_obj.create(do_reload=wait)
    assert created_resource, (f"Failed to create resource {resource_name}")
    if wait:
        assert wait_for_resource_state(resource=resource_name,
                                       state=desired_status)
    return ocs_obj
def setup_fs(self):
    """
    Setting up the environment for the test
    """
    global CEPH_OBJ
    self.fs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML)
    self.fs_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'cephfs')
    self.fs_data['metadata']['namespace'] = config.ENV_DATA[
        'cluster_namespace']
    CEPH_OBJ = OCS(**self.fs_data)
    CEPH_OBJ.create()
    assert POD.wait_for_resource(condition='Running',
                                 selector='app=rook-ceph-mds')
    pods = POD.get(selector='app=rook-ceph-mds')['items']
    assert len(pods) == 2
Пример #25
0
    def run_pillowfights(self):
        """
        loop through all the yaml files extracted from the pillowfight repo
        and run them.  Run oc logs on the results and save the logs in self.logs
        directory

        """
        ocp_local = OCP(namespace=self.namespace)
        pf_files = listdir(constants.TEMPLATE_PILLOWFIGHT_DIR)
        for pf_yaml in pf_files:
            pf_fullpath = join(constants.TEMPLATE_PILLOWFIGHT_DIR, pf_yaml)
            if not pf_fullpath.endswith('.yaml'):
                continue
            if not isfile(pf_fullpath):
                continue
            pfight = templating.load_yaml(pf_fullpath)
            lpillowfight = OCS(**pfight)
            lpillowfight.create()
            pf_completion_info = ''
            pf_pod = ''
            for pillowfight_pod in TimeoutSampler(self.WAIT_FOR_TIME, 3,
                                                  get_pod_name_by_pattern,
                                                  'pillowfight',
                                                  self.COUCHBASE_OPERATOR):
                try:
                    pf_pod = pillowfight_pod[0]
                    pod_info = self.up_check.exec_oc_cmd(
                        f"get pods {pf_pod} -o json")
                    pf_status = pod_info['status']['containerStatuses'][0][
                        'state']
                    if 'terminated' in pf_status:
                        pf_completion_info = pf_status['terminated']['reason']
                        break
                except IndexError:
                    log.info(f"Pillowfight {pf_yaml} not yet completed")
            if pf_completion_info == 'Error':
                raise Exception(f"Pillowfight {pf_yaml} failed to complete")
            if pf_completion_info == 'Completed':
                pf_prefix = pf_yaml[0:pf_yaml.find(".")]
                pf_endlog = f'{pf_prefix}.log'
                pf_log = join(self.logs, pf_endlog)
                data_from_log = ocp_local.exec_oc_cmd(
                    f"logs -f {pf_pod} --ignore-errors", out_yaml_format=False)
                data_from_log = data_from_log.replace('\x00', '')
                with open(pf_log, 'w') as fd:
                    fd.write(data_from_log)
Пример #26
0
def setup_ceph_toolbox():
    """
    Setup ceph-toolbox - also checks if toolbox exists, if it exists it
    behaves as noop.
    """
    namespace = ocsci_config.ENV_DATA['cluster_namespace']
    ceph_toolbox = get_pod_name_by_pattern('rook-ceph-tools', namespace)
    if len(ceph_toolbox) == 1:
        log.info("Ceph toolbox already exists, skipping")
        return
    rook_operator = get_pod_name_by_pattern('rook-ceph-operator', namespace)
    out = run_cmd(f'oc -n {namespace} get pods {rook_operator[0]} -o yaml', )
    version = yaml.safe_load(out)
    rook_version = version['spec']['containers'][0]['image']
    tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
    tool_box_data['spec']['template']['spec']['containers'][0][
        'image'] = rook_version
    rook_toolbox = OCS(**tool_box_data)
    rook_toolbox.create()
Пример #27
0
    def test_smallfile_workload(self, ripsaw):
        """
        Run SmallFile Workload
        """
        log.info("Apply Operator CRD")
        ripsaw.apply_crd('resources/crds/ripsaw_v1alpha1_ripsaw_crd.yaml')

        log.info("Running SmallFile bench")
        sf_data = templating.load_yaml_to_dict(
            constants.SMALLFILE_BENCHMARK_YAML)
        sf_obj = OCS(**sf_data)
        sf_obj.create()
        # wait for benchmark pods to get created - takes a while
        for bench_pod in TimeoutSampler(40, 3, get_pod_name_by_pattern,
                                        'smallfile-client', 'my-ripsaw'):
            try:
                if bench_pod[0] is not None:
                    small_file_client_pod = bench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        bench_pod = OCP(kind='pod', namespace='my-ripsaw')
        log.info("Waiting for SmallFile benchmark to Run")
        assert bench_pod.wait_for_resource(condition=constants.STATUS_RUNNING,
                                           resource_name=small_file_client_pod,
                                           sleep=30,
                                           timeout=600)
        start_time = time.time()
        timeout = 900
        while True:
            logs = bench_pod.exec_oc_cmd(f'logs {small_file_client_pod}',
                                         out_yaml_format=False)
            if "RUN STATUS DONE" in logs:
                log.info("SmallFile Benchmark Completed Successfully")
                break

            if timeout < (time.time() - start_time):
                raise TimeoutError(
                    f"Timed out waiting for benchmark to complete")
            time.sleep(30)
Пример #28
0
def setup(self):
    """
    Setting up the environment for the test
    """
    # Create a storage class
    log.info("Creating a Storage Class")
    self.sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML)
    self.sc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'csi-rbd')
    global SC_OBJ
    SC_OBJ = OCS(**self.sc_data)
    assert SC_OBJ.create()
    log.info(f"Storage class: {SC_OBJ.name} created successfully")
    log.debug(self.sc_data)
Пример #29
0
def create_resource(do_reload=True, **kwargs):
    """
    Create a resource

    Args:
        do_reload (bool): True for reloading the resource following its creation,
            False otherwise
        kwargs (dict): Dictionary of the OCS resource

    Returns:
        OCS: An OCS instance

    Raises:
        AssertionError: In case of any failure
    """
    ocs_obj = OCS(**kwargs)
    resource_name = kwargs.get('metadata').get('name')
    created_resource = ocs_obj.create(do_reload=do_reload)
    assert created_resource, (f"Failed to create resource {resource_name}")
    return ocs_obj
Пример #30
0
def create_pvc_snapshot(pvc_name, snap_yaml, snap_name, sc_name):
    """
    Create snapshot of a PVC

    Args:
        pvc_name (str): Name of the PVC
        snap_yaml (str): The path of snapshot yaml
        snap_name (str): The name of the snapshot to be created
        sc_name (str): The name of the snapshot class

    Returns:
        OCS object
    """
    snapshot_data = templating.load_yaml(snap_yaml)
    snapshot_data['metadata']['name'] = snap_name
    snapshot_data['spec']['volumeSnapshotClassName'] = sc_name
    snapshot_data['spec']['source']['persistentVolumeClaimName'] = pvc_name
    ocs_obj = OCS(**snapshot_data)
    created_snap = ocs_obj.create(do_reload=True)
    assert created_snap, f"Failed to create snapshot {snap_name}"
    return ocs_obj