Beispiel #1
0
    def create_quay_registry(self):
        """
        Creates Quay registry

        """
        if not helpers.get_default_storage_class():
            patch = ' \'{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}\' '
            run_cmd(f"oc patch storageclass {self.sc_name} "
                    f"-p {patch} "
                    f"--request-timeout=120s")
            self.sc_default = True
        self.quay_registry_secret_name = create_unique_resource_name(
            "quay-user", "secret")
        logger.info(
            f"Creating Quay registry config for super-user access: {self.quay_registry_secret_name}"
        )
        self.quay_registry_secret = self.ocp_obj.exec_oc_cmd(
            command=
            f"create secret generic --from-file config.yaml={constants.QUAY_SUPER_USER} "
            f"{self.quay_registry_secret_name}")
        quay_registry_data = templating.load_yaml(file=constants.QUAY_REGISTRY)
        self.quay_registry_name = quay_registry_data["metadata"]["name"]
        quay_registry_data["spec"][
            "configBundleSecret"] = self.quay_registry_secret_name
        self.quay_registry = OCS(**quay_registry_data)
        logger.info(f"Creating Quay registry: {self.quay_registry.name}")
        self.quay_registry.create()
        logger.info("Waiting for 15s for registry to get initialized")
        sleep(15)
        self.wait_for_quay_endpoint()
Beispiel #2
0
    def teardown(self):
        """
        Delete objects created in roughly reverse order of how they were created.

        """
        self.cb_examples.delete()
        self.cb_worker.delete()
        self.cb_deploy.delete()
        self.pod_obj.exec_oc_cmd(
            command="delete rolebinding couchbase-operator-rolebinding")
        self.pod_obj.exec_oc_cmd(
            command="delete serviceaccount couchbase-operator")
        self.operator_role.delete()
        self.couchbase_obj.delete()
        switch_to_project('default')
        self.pod_obj.delete_project(constants.COUCHBASE_OPERATOR)
        for adm_yaml in self.admission_parts:
            adm_data = templating.load_yaml(adm_yaml)
            adm_obj = OCS(**adm_data)
            adm_obj.delete()
        # Before the code below was added, the teardown task would sometimes
        # fail with the leftover objects because it would still see one of the
        # couchbase pods.
        for admin_pod in TimeoutSampler(self.WAIT_FOR_TIME, 3,
                                        get_pod_name_by_pattern, 'couchbase',
                                        'default'):
            if admin_pod:
                continue
            else:
                break
        PillowFight.cleanup(self)
        switch_to_default_rook_cluster_project()
Beispiel #3
0
    def test_verify_all_fields_in_sc_yaml_with_oc_describe(self, interface):
        """
        Test function to create RBD and CephFS SC, and match with oc describe sc
        output
        """
        log.info(f"Creating a {interface} storage class")
        self.sc_data = templating.load_yaml(
            getattr(constants, f"CSI_{interface}_STORAGECLASS_YAML"))
        self.sc_data['metadata']['name'] = (
            helpers.create_unique_resource_name('test',
                                                f'csi-{interface.lower()}'))
        global SC_OBJ
        SC_OBJ = OCS(**self.sc_data)
        assert SC_OBJ.create()
        log.info(
            f"{interface}Storage class: {SC_OBJ.name} created successfully")
        log.info(self.sc_data)

        # Get oc describe sc output
        describe_out = SC_OBJ.get("sc")
        log.info(describe_out)

        # Confirm that sc yaml details matches oc describe sc output
        value = {
            k: describe_out[k]
            for k in set(describe_out) - set(self.sc_data)
        }
        assert len(value) == 1 and value['volumeBindingMode'] == 'Immediate', (
            "OC describe sc output didn't match storage class yaml")
        log.info("OC describe sc output matches storage class yaml")
        # Delete Storage Class
        log.info(f"Deleting Storageclass: {SC_OBJ.name}")
        assert SC_OBJ.delete()
        log.info(f"Storage Class: {SC_OBJ.name} deleted successfully")
        del SC_OBJ
Beispiel #4
0
def invalid_storageclass(request):
    """
    Creates a CephFS or RBD StorageClass with invalid parameters.

    Storageclass is removed at the end of test.

    Returns:
        str: Name of created StorageClass
    """
    logger.info(f"SETUP - creating storageclass "
                f"{request.param['values']['storageclass_name']}")
    yaml_path = os.path.join(request.param['template_dir'],
                             "storageclass.yaml")
    with open(yaml_path, 'r') as fd:
        yaml_data = yaml.safe_load(fd)
    yaml_data.update(request.param['values'])
    storageclass = OCS(**yaml_data)
    sc_data = storageclass.create()

    logger.debug('Check that storageclass has assigned creationTimestamp')
    assert sc_data['metadata']['creationTimestamp']

    yield sc_data

    logger.info(f"TEARDOWN - removing storageclass "
                f"{request.param['values']['storageclass_name']}")
    storageclass.delete()
Beispiel #5
0
def test_ceph_rgw_metrics_after_metrics_exporter_respin(rgw_deployments):
    """
    RGW metrics should be provided via OCP Prometheus even after
    ocs-metrics-exporter pod is respinned.

    """
    logger.info("Respin ocs-metrics-exporter pod")
    pod_obj = ocp.OCP(kind=constants.POD,
                      namespace=defaults.ROOK_CLUSTER_NAMESPACE)
    metrics_pods = pod_obj.get(
        selector="app.kubernetes.io/name=ocs-metrics-exporter")["items"]
    assert len(metrics_pods) == 1
    metrics_pod_data = metrics_pods[0]
    metrics_pod = OCS(**metrics_pod_data)
    metrics_pod.delete(force=True)

    logger.info("Wait for ocs-metrics-exporter pod to come up")
    assert pod_obj.wait_for_resource(
        condition="Running",
        selector="app.kubernetes.io/name=ocs-metrics-exporter",
        resource_count=1,
        timeout=600,
    )

    logger.info("Collect RGW metrics")
    prometheus = PrometheusAPI()
    list_of_metrics_without_results = metrics.get_missing_metrics(
        prometheus, metrics.ceph_rgw_metrics)
    msg = (
        "OCS Monitoring should provide some value(s) for tested rgw metrics, "
        "so that the list of metrics without results is empty.")
    assert list_of_metrics_without_results == [], msg
Beispiel #6
0
def setup_ceph_toolbox():
    """
    Setup ceph-toolbox - also checks if toolbox exists, if it exists it
    behaves as noop.
    """
    namespace = ocsci_config.ENV_DATA['cluster_namespace']
    ceph_toolbox = get_pod_name_by_pattern('rook-ceph-tools', namespace)
    if len(ceph_toolbox) == 1:
        log.info("Ceph toolbox already exists, skipping")
        return
    if ocsci_config.ENV_DATA.get("ocs_version") == '4.2':
        rook_operator = get_pod_name_by_pattern('rook-ceph-operator',
                                                namespace)
        out = run_cmd(
            f'oc -n {namespace} get pods {rook_operator[0]} -o yaml', )
        version = yaml.safe_load(out)
        rook_version = version['spec']['containers'][0]['image']
        tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
        tool_box_data['spec']['template']['spec']['containers'][0][
            'image'] = rook_version
        rook_toolbox = OCS(**tool_box_data)
        rook_toolbox.create()
    else:
        # for OCS >= 4.3 there is new toolbox pod deployment done here:
        # https://github.com/openshift/ocs-operator/pull/207/
        log.info("starting ceph toolbox pod")
        run_cmd(
            'oc patch ocsinitialization ocsinit -n openshift-storage --type '
            'json --patch  \'[{ "op": "replace", "path": '
            '"/spec/enableCephTools", "value": true }]\'')
Beispiel #7
0
def create_cephfilesystem():
    """
    Function for deploying CephFileSystem (MDS)

    Returns:
        bool: True if CephFileSystem creates successful
    """
    fs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML)
    fs_data['metadata']['name'] = create_unique_resource_name(
        'test', 'cephfs'
    )
    fs_data['metadata']['namespace'] = config.ENV_DATA['cluster_namespace']
    global CEPHFS_OBJ
    CEPHFS_OBJ = OCS(**fs_data)
    CEPHFS_OBJ.create()
    POD = pod.get_all_pods(
        namespace=defaults.ROOK_CLUSTER_NAMESPACE
    )
    for pod_names in POD:
        if 'rook-ceph-mds' in pod_names.labels.values():
            assert pod_names.ocp.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector='app=rook-ceph-mds'
            )
    assert validate_cephfilesystem(fs_name=fs_data['metadata']['name'])
    return True
Beispiel #8
0
    def deploy_and_wait_for_wl_to_start(self, timeout=300, sleep=20):
        """
        Deploy the workload and wait until it start working

        Args:
            timeout (int): time in second to wait until the benchmark start
            sleep (int): Sleep interval seconds

        """
        log.debug(f"The {self.benchmark_name} CR file is {self.crd_data}")
        self.benchmark_obj = OCS(**self.crd_data)
        self.benchmark_obj.create()

        # This time is only for reporting - when the benchmark started.
        self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())

        # Wait for benchmark client pod to be created
        log.info(f"Waiting for {self.client_pod_name} to Start")
        for bm_pod in TimeoutSampler(
                timeout,
                sleep,
                get_pod_name_by_pattern,
                self.client_pod_name,
                constants.RIPSAW_NAMESPACE,
        ):
            try:
                if bm_pod[0] is not None:
                    self.client_pod = bm_pod[0]
                    break
            except IndexError:
                log.info("Bench pod is not ready yet")
        # Sleeping for 15 sec for the client pod to be fully accessible
        time.sleep(15)
        log.info(f"The benchmark pod {self.client_pod_name} is Running")
Beispiel #9
0
    def create_kafka_topic(self, name="my-topic", partitions=1, replicas=1):
        """
        Creates kafka topic

        Args:
            name (str): Name of the kafka topic
            partitions (int): Number of partitions
            replicas (int): Number of replicas

        Return: kafka_topic object
        """
        try:
            kafka_topic = templating.load_yaml(
                os.path.join(self.dir, self.kafka_topic_yaml)
            )
            kafka_topic["metadata"]["name"] = name
            kafka_topic["spec"]["partitions"] = partitions
            kafka_topic["spec"]["replicas"] = replicas
            self.kafka_topic = OCS(**kafka_topic)
            self.kafka_topic.create()
        except (CommandFailed, CalledProcessError) as cf:
            if f'kafkatopics.kafka.strimzi.io "{name}" already exists' not in str(cf):
                log.error("Failed during creating of Kafka topic")
                raise cf

        # Making sure kafka topic created
        if self.kafka_topic_obj.get(resource_name=name):
            return self.kafka_topic
        else:
            raise ResourceWrongStatusException("kafka topic is not created")
Beispiel #10
0
    def create_ocs_jenkins_template(self):
        """

        Create OCS Jenkins Template
        """
        log.info("Create Jenkins Template, jenkins-persistent-ocs")
        ocp_obj = OCP(namespace='openshift', kind='template')
        tmp_dict = ocp_obj.get(
            resource_name='jenkins-persistent', out_yaml_format=True
        )
        tmp_dict['labels']['app'] = 'jenkins-persistent-ocs'
        tmp_dict['labels']['template'] = 'jenkins-persistent-ocs-template'
        tmp_dict['metadata']['name'] = 'jenkins-persistent-ocs'
        tmp_dict['objects'][1]['metadata']['annotations'] = {
            'volume.beta.kubernetes.io/storage-class': 'ocs-storagecluster-ceph-rbd'
        }
        tmp_dict['objects'][2]['spec']['template']['spec']['containers'][0]['env'].append(
            {'name': 'JAVA_OPTS', 'value': '${JAVA_OPTS}'})
        tmp_dict['parameters'][4]['value'] = '10Gi'
        tmp_dict['parameters'].append({
            'description': "Override jenkins options to speed up slave spawning",
            'displayName': 'Override jenkins options to speed up slave spawning',
            'name': 'JAVA_OPTS',
            'value': "-Dhudson.slaves.NodeProvisioner.initialDelay=0 "
                     "-Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson."
                     "slaves.NodeProvisioner.MARGIN0=0.85"
        })
        ocs_jenkins_template_obj = OCS(**tmp_dict)
        ocs_jenkins_template_obj.create()
Beispiel #11
0
    def create_ocs_jenkins_template(self):
        """

        Create OCS Jenkins Template
        """
        log.info("Create Jenkins Template, jenkins-persistent-ocs")
        ocp_obj = OCP(namespace="openshift", kind="template")
        tmp_dict = ocp_obj.get(resource_name="jenkins-persistent",
                               out_yaml_format=True)
        tmp_dict["labels"]["app"] = "jenkins-persistent-ocs"
        tmp_dict["labels"]["template"] = "jenkins-persistent-ocs-template"
        tmp_dict["metadata"]["name"] = "jenkins-persistent-ocs"
        # Find Kind: 'PersistentVolumeClaim' position in the objects list, differs in OCP 4.5 and OCP 4.6.
        for i in range(len(tmp_dict["objects"])):
            if tmp_dict["objects"][i]["kind"] == constants.PVC:
                tmp_dict["objects"][i]["metadata"]["annotations"] = {
                    "volume.beta.kubernetes.io/storage-class":
                    "ocs-storagecluster-ceph-rbd"
                }

        tmp_dict["parameters"][4]["value"] = "10Gi"
        tmp_dict["parameters"].append({
            "description":
            "Override jenkins options to speed up slave spawning",
            "displayName":
            "Override jenkins options to speed up slave spawning",
            "name":
            "JAVA_OPTS",
            "value":
            "-Dhudson.slaves.NodeProvisioner.initialDelay=0 "
            "-Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson."
            "slaves.NodeProvisioner.MARGIN0=0.85",
        })
        ocs_jenkins_template_obj = OCS(**tmp_dict)
        ocs_jenkins_template_obj.create()
Beispiel #12
0
    def create_kafka_user(self, name="my-user"):
        """
        Creates kafka user

        Args:
             name (str): Name of the kafka user

        Return: kafka_user object

        """
        try:
            kafka_user = templating.load_yaml(
                os.path.join(self.dir, self.kafka_user_yaml)
            )
            kafka_user["metadata"]["name"] = name
            self.kafka_user = OCS(**kafka_user)
            self.kafka_user.create()
        except (CommandFailed, CalledProcessError) as cf:
            log.error("Failed during creating of Kafka user")
            raise cf

        # Making sure kafka user created
        if self.kafka_user_obj.get(resource_name=name):
            return self.kafka_user
        else:
            raise ResourceWrongStatusException("kafka user is not created")
def create_storageclass(sc_name, expect_fail=False):
    """
    Function to create a storage class and check for
    duplicate storage class name

    Args:
        sc_name (str): name of the storageclass to be created
        expect_fail (bool): To catch the incorrect scenario if
            two SCs are indeed created with same name

    Returns:
        None

    """

    # Create a storage class
    sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML)
    sc_data["metadata"]["name"] = sc_name
    sc_data["parameters"]["clusterID"] = defaults.ROOK_CLUSTER_NAMESPACE

    global SC_OBJ
    SC_OBJ = OCS(**sc_data)

    # Check for expected failure with duplicate SC name
    try:
        SC_OBJ.create()
        assert not expect_fail, "SC creation with same name passed. Expected to fail !"
        log.info(f"Storage class: {SC_OBJ.name} created successfully !")
        log.debug(sc_data)

    except CommandFailed as ecf:
        assert "AlreadyExists" in str(ecf)
        log.info(f"Cannot create two StorageClasses with same name !"
                 f" Error message:  \n"
                 f"{ecf}")
Beispiel #14
0
def get_machineset_objs(machineset_names=None):
    """
    Get machineset objects by machineset names

    Args:
        machineset_names (list): The machineset names to get their objects
        If None, will return all cluster machines

    Returns:
        list: Cluster machineset OCS objects

    """
    machinesets_obj = OCP(
        kind=constants.MACHINESETS, namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE
    )

    machineset_dicts = machinesets_obj.get()["items"]
    if not machineset_names:
        return [OCS(**obj) for obj in machineset_dicts]
    else:
        return [
            OCS(**obj)
            for obj in machineset_dicts
            if (obj.get("metadata").get("name") in machineset_names)
        ]
Beispiel #15
0
def create_pvc_snapshot(pvc_name,
                        snap_yaml,
                        snap_name,
                        sc_name=None,
                        wait=False):
    """
    Create snapshot of a PVC

    Args:
        pvc_name (str): Name of the PVC
        snap_yaml (str): The path of snapshot yaml
        snap_name (str): The name of the snapshot to be created
        sc_name (str): The name of the snapshot class
        wait (bool): True to wait for snapshot to be ready, False otherwise

    Returns:
        OCS object
    """
    snapshot_data = templating.load_yaml(snap_yaml)
    snapshot_data['metadata']['name'] = snap_name
    if sc_name:
        snapshot_data['spec']['volumeSnapshotClassName'] = sc_name
    snapshot_data['spec']['source']['persistentVolumeClaimName'] = pvc_name
    ocs_obj = OCS(**snapshot_data)
    created_snap = ocs_obj.create(do_reload=True)
    assert created_snap, f"Failed to create snapshot {snap_name}"
    if wait:
        ocs_obj.ocp.wait_for_resource(condition='true',
                                      resource_name=ocs_obj.name,
                                      column=constants.STATUS_READYTOUSE,
                                      timeout=60)
    return ocs_obj
Beispiel #16
0
    def create_consumer_pod(self, num_of_pods=1, value="10000"):
        """
        Creates producer pods

        Args:
            num_of_pods (int): Number of consumer pods to be created
            value (str): Number of messages to be received

        Returns: consumer pod object

        """
        try:
            consumer_pod = templating.load_yaml(constants.HELLO_WORLD_CONSUMER_YAML)
            consumer_pod["spec"]["replicas"] = num_of_pods
            consumer_pod["spec"]["template"]["spec"]["containers"][0]["env"][4][
                "value"
            ] = value
            self.consumer_pod = OCS(**consumer_pod)
            self.consumer_pod.create()
        except (CommandFailed, CalledProcessError) as cf:
            log.error("Failed during creation of consumer pod")
            raise cf

        # Making sure the producer pod is running
        if self.is_amq_pod_running(
            pod_pattern="hello-world-consumer", expected_pods=num_of_pods
        ):
            return self.consumer_pod
        else:
            raise ResourceWrongStatusException(
                "consumer pod is not getting to running state"
            )
Beispiel #17
0
def increase_pods_per_worker_node_count(pods_per_node=500, pods_per_core=10):
    """
    Function to increase pods per node count, default OCP supports 250 pods per node,
    from OCP 4.6 limit is going to be 500, but using this function can override this param
    to create more pods per worker nodes.
    more detail: https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-managing-max-pods.html

    Example: The default value for podsPerCore is 10 and the default value for maxPods is 250.
    This means that unless the node has 25 cores or more, by default, podsPerCore will be the limiting factor.

    WARN: This function will perform Unscheduling of workers and reboot so
    Please aware if there is any non-dc pods then expected to be terminated.

    Args:
        pods_per_node (int): Pods per node limit count
        pods_per_core (int): Pods per core limit count

    Raise:
        UnexpectedBehaviour if machineconfigpool not in Updating state within 40secs.

    """
    max_pods_template = templating.load_yaml(
        constants.PODS_PER_NODE_COUNT_YAML)
    max_pods_template["spec"]["kubeletConfig"]["podsPerCore"] = pods_per_core
    max_pods_template["spec"]["kubeletConfig"]["maxPods"] = pods_per_node

    # Create new max-pods label
    max_pods_obj = OCS(**max_pods_template)
    assert max_pods_obj.create()

    # Apply the changes in the workers
    label_cmd = "label machineconfigpool worker custom-kubelet=small-pods"
    ocp = OCP()
    assert ocp.exec_oc_cmd(command=label_cmd)

    # First wait for Updating status to become True, default it will be False &
    # machine_count and ready_machine_count will be equal
    get_cmd = "get machineconfigpools -o yaml"
    timout_counter = 0
    while True:
        output = ocp.exec_oc_cmd(command=get_cmd)
        update_status = (output.get("items")[1].get("status").get("conditions")
                         [4].get("status"))
        if update_status == "True":
            break
        elif timout_counter >= 8:
            raise UnexpectedBehaviour(
                "After 40sec machineconfigpool not in Updating state")
        else:
            logging.info("Sleep 5secs for updating status change")
            timout_counter += 1
            time.sleep(5)

    # Validate either change is successful
    output = ocp.exec_oc_cmd(command=get_cmd)
    machine_count = output.get("items")[1].get("status").get("machineCount")
    # During manual execution observed each node took 240+ sec for update
    timeout = machine_count * 300
    utils.wait_for_machineconfigpool_status(node_type=constants.WORKER_MACHINE,
                                            timeout=timeout)
Beispiel #18
0
    def deploy_and_wait_for_wl_to_start(self):
        """
        Deploy the workload and wait until it start working

        Returns:
            obj : the FIO client pod object

        """
        log.info(f"The FIO CR file is {self.fio_cr}")
        self.fio_cr_obj = OCS(**self.fio_cr)
        self.fio_cr_obj.create()

        # Wait for fio client pod to be created
        for fio_pod in TimeoutSampler(900, 20, get_pod_name_by_pattern,
                                      "fio-client",
                                      constants.RIPSAW_NAMESPACE):
            try:
                if fio_pod[0] is not None:
                    fio_client_pod = fio_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        # Getting the start time of the test
        self.start_time = time.strftime("%Y-%m-%dT%H:%M:%SGMT", time.gmtime())
        return fio_client_pod
Beispiel #19
0
    def setup_amq_kafka_connect(self):
        """
        The function is to setup amq-kafka-connect, the yaml file is pulling from github
        it will make kind: KafkaConnect and will make sure the status is running

        Returns: kafka_connect object
        """
        try:
            kafka_connect = templating.load_yaml(
                os.path.join(self.dir, self.amq_kafka_connect_yaml)
            )
            self.kafka_connect = OCS(**kafka_connect)
            self.kafka_connect.create()
        except (CommandFailed, CalledProcessError) as cf:
            log.error("Failed during setup of AMQ KafkaConnect")
            raise cf

        if self.is_amq_pod_running(
            pod_pattern="my-connect-cluster-connect", expected_pods=1
        ):
            return self.kafka_connect
        else:
            raise ResourceWrongStatusException(
                "my-connect-cluster-connect pod is not getting to running state"
            )
Beispiel #20
0
def create_resource(desired_status=constants.STATUS_AVAILABLE,
                    wait=True,
                    **kwargs):
    """
    Create a resource

    Args:
        desired_status (str): The status of the resource to wait for
        wait (bool): True for waiting for the resource to reach the desired
            status, False otherwise
        kwargs (dict): Dictionary of the OCS resource

    Returns:
        OCS: An OCS instance

    Raises:
        AssertionError: In case of any failure
    """
    ocs_obj = OCS(**kwargs)
    resource_name = kwargs.get('metadata').get('name')
    created_resource = ocs_obj.create(do_reload=wait)
    assert created_resource, (f"Failed to create resource {resource_name}")
    if wait:
        assert wait_for_resource_state(resource=resource_name,
                                       state=desired_status)
    return ocs_obj
Beispiel #21
0
    def create_pgbench_benchmark(self,
                                 replicas,
                                 clients=None,
                                 threads=None,
                                 transactions=None,
                                 scaling_factor=None,
                                 timeout=None):
        """
        Create pgbench benchmark pods

        Args:
            replicas (int): Number of pgbench pods to be deployed
            clients (int): Number of clients
            threads (int): Number of threads
            transactions (int): Number of transactions
            scaling_factor (int): scaling factor
            timeout (int): Time in seconds to wait

        Returns:
            List: pgbench pod objects list

        """
        pg_obj_list = []
        for i in range(replicas):
            log.info("Create resource file for pgbench workload")
            pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
            pg_data['metadata']['name'] = 'pgbench-benchmark' + f"{i}"
            pg_data['spec']['workload']['args']['databases'][0][
                'host'] = "postgres-" + f"{i}" + ".postgres"

            if clients is not None:
                pg_data['spec']['workload']['args']['clients'][0] = clients
            if threads is not None:
                pg_data['spec']['workload']['args']['threads'] = threads
            if transactions is not None:
                pg_data['spec']['workload']['args'][
                    'transactions'] = transactions
            if scaling_factor is not None:
                pg_data['spec']['workload']['args'][
                    'scaling_factor'] = scaling_factor
            pg_obj = OCS(**pg_data)
            pg_obj_list.append(pg_obj)
            pg_obj.create()
        # Confirm that expected pgbench pods are spinned
        log.info("Checking if Getting pgbench pods name")
        timeout = timeout if timeout else 300
        for pgbench_pods in TimeoutSampler(timeout, replicas,
                                           get_pod_name_by_pattern,
                                           'pgbench-1-dbs-client',
                                           RIPSAW_NAMESPACE):
            try:
                if len(pgbench_pods) == replicas:
                    log.info(f"Expected number of pgbench pods are "
                             f"found: {replicas}")
                    break
            except IndexError:
                log.info(f'Expected number of pgbench pods are {replicas} '
                         f'but only found {len(pgbench_pods)}')
        return pg_obj_list
Beispiel #22
0
    def create_couchbase_worker(self, replicas=1, sc_name=None):
        """
        Deploy a Couchbase server and pillowfight workload using operator

        The couchbase workers do not come up unless there is an admission controller
        running.  The admission controller is started from the default project prior
        to bringing up the operator.  Secrets, rolebindings and serviceaccounts
        need to also be generated.

        Once the couchbase operator is running, we need to wait for the three
        worker pods to also be up.  Then a pillowfight task is started.

        After the pillowfight task has finished, the log is collected and
        analyzed.

        Raises:
            Exception: If pillowfight results indicate that a minimum performance
                level is not reached (1 second response time, less than 1000 ops
                per second)

        """
        logging.info("Creating pods..")
        cb_example = templating.load_yaml(constants.COUCHBASE_WORKER_EXAMPLE)
        if storagecluster_independent_check():
            cb_example["spec"]["volumeClaimTemplates"][0]["spec"][
                "storageClassName"
            ] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
        cb_example["spec"]["servers"][0]["size"] = replicas
        if sc_name:
            cb_example["spec"]["volumeClaimTemplates"][0]["spec"][
                "storageClassName"
            ] = sc_name
        self.cb_examples = OCS(**cb_example)
        self.cb_examples.create()

        # Wait for last of three workers to be running.

        logging.info("Waiting for the pods to Running")
        for cb_wrk_pods in TimeoutSampler(
            self.WAIT_FOR_TIME,
            3,
            get_pod_name_by_pattern,
            "cb-example",
            constants.COUCHBASE_OPERATOR,
        ):
            try:
                if len(cb_wrk_pods) == replicas:
                    counter = 0
                    for cb_pod in cb_wrk_pods:
                        if self.is_up_and_running(cb_pod, self.up_check):
                            counter += 1
                            logging.info(f"Couchbase worker {cb_pod} is up")
                    if counter == replicas:
                        break
            except IndexError:
                logging.info(
                    f"Expected number of couchbase pods are {replicas} "
                    f"but only found {len(cb_wrk_pods)}"
                )
Beispiel #23
0
    def setup_amq_cluster_operator(self, namespace=constants.AMQ_NAMESPACE):
        """
        Function to setup amq-cluster_operator,
        the file is pulling from github
        it will make sure cluster-operator pod is running

        Args:
            namespace (str): Namespace for AMQ pods

        """

        # Namespace for amq
        try:
            self.create_namespace(namespace)
        except CommandFailed as ef:
            if f'project.project.openshift.io "{namespace}" already exists' not in str(
                ef
            ):
                raise ef

        # Create strimzi-cluster-operator pod
        run(
            f"for i in `(ls strimzi-kafka-operator/packaging/install/cluster-operator/)`;"
            f"do sed 's/{namespace}/myproject/g' "
            f"strimzi-kafka-operator/packaging/install/cluster-operator/$i;done",
            shell=True,
            check=True,
            cwd=self.dir,
        )
        self.strimzi_kafka_operator = os.path.join(self.dir, self.amq_dir)
        pf_files = os.listdir(self.strimzi_kafka_operator)
        crds = []
        for crd in pf_files:
            crds.append(crd)
        self.crd_objects = []
        for adm_yaml in crds:
            try:
                adm_data = templating.load_yaml(self.strimzi_kafka_operator + adm_yaml)
                adm_obj = OCS(**adm_data)
                adm_obj.create()
                self.crd_objects.append(adm_obj)
            except (CommandFailed, CalledProcessError) as cfe:
                if "Error is Error from server (AlreadyExists):" in str(cfe):
                    log.warn(
                        "Some amq leftovers are present, please cleanup the cluster"
                    )
                    pytest.skip(
                        "AMQ leftovers are present needs to cleanup the cluster"
                    )
        time.sleep(30)
        #  Check strimzi-cluster-operator pod created
        if self.is_amq_pod_running(pod_pattern="cluster-operator", expected_pods=1):
            log.info("strimzi-cluster-operator pod is in running state")
        else:
            raise ResourceWrongStatusException(
                "strimzi-cluster-operator pod is not getting to running state"
            )
Beispiel #24
0
    def couchbase_operatorgroup(self):
        """
        Creates an operator group for Couchbase

        """
        operatorgroup_yaml = templating.load_yaml(
            constants.COUCHBASE_OPERATOR_GROUP_YAML)
        self.operatorgroup_yaml = OCS(**operatorgroup_yaml)
        self.operatorgroup_yaml.create()
Beispiel #25
0
    def create_cb_secrets(self):
        """ "
        Create secrets for running Couchbase workers

        """
        cb_secrets = templating.load_yaml(constants.COUCHBASE_WORKER_SECRET)
        self.cb_secrets = OCS(**cb_secrets)
        self.cb_secrets.create()
        log.info("Successfully created secrets for Couchbase")
        self.cb_create_cb_secret = True
Beispiel #26
0
    def create_data_buckets(self):
        """
        Create data buckets

        """
        cb_bucket = templating.load_yaml(constants.COUCHBASE_DATA_BUCKET)
        self.cb_bucket = OCS(**cb_bucket)
        self.cb_bucket.create()
        log.info("Successfully created data buckets")
        self.cb_create_bucket = True
Beispiel #27
0
def setup_ceph_toolbox():
    """
    Setup ceph-toolbox based
    """
    namespace = ocsci_config.ENV_DATA['cluster_namespace']
    rook_operator = get_pod_name_by_pattern('rook-ceph-operator', namespace)
    out = run_cmd(f'oc -n {namespace} get pods {rook_operator[0]} -o yaml', )
    version = yaml.safe_load(out)
    rook_version = version['spec']['containers'][0]['image']
    tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
    tool_box_data['spec']['template']['spec']['containers'][0][
        'image'] = rook_version
    rook_toolbox = OCS(**tool_box_data)
    rook_toolbox.create()
Beispiel #28
0
def default_storageclasses(request, teardown_factory_session):
    """
    Returns dictionary with storageclasses. Keys represent reclaim policy of
    storageclass. There are two storageclasses for each key. First is RBD based
    and the second one is CephFS based. Storageclasses with Retain Reclaim
    Policy are created from default storageclasses.
    """
    scs = {
        constants.RECLAIM_POLICY_DELETE: [],
        constants.RECLAIM_POLICY_RETAIN: []
    }

    # TODO(fbalak): Use proper constants after
    # https://github.com/red-hat-storage/ocs-ci/issues/1056
    # is resolved
    for sc_name in ('ocs-storagecluster-ceph-rbd',
                    'ocs-storagecluster-cephfs'):
        sc = OCS(kind=constants.STORAGECLASS, metadata={'name': sc_name})
        sc.reload()
        scs[constants.RECLAIM_POLICY_DELETE].append(sc)
        sc.data['reclaimPolicy'] = constants.RECLAIM_POLICY_RETAIN
        sc.data['metadata']['name'] += '-retain'
        sc._name = sc.data['metadata']['name']
        sc.create()
        teardown_factory_session(sc)
        scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
    return scs
def setup(self):
    """
    Setting up the environment for the test
    """
    # Create a storage class
    log.info("Creating a Storage Class")
    self.sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML)
    self.sc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'csi-rbd')
    global SC_OBJ
    SC_OBJ = OCS(**self.sc_data)
    assert SC_OBJ.create()
    log.info(f"Storage class: {SC_OBJ.name} created successfully")
    log.debug(self.sc_data)
Beispiel #30
0
def setup_ceph_toolbox(force_setup=False):
    """
    Setup ceph-toolbox - also checks if toolbox exists, if it exists it
    behaves as noop.

    Args:
        force_setup (bool): force setup toolbox pod

    """
    namespace = ocsci_config.ENV_DATA["cluster_namespace"]
    ceph_toolbox = get_pod_name_by_pattern("rook-ceph-tools", namespace)
    # setup toolbox for external mode
    # Refer bz: 1856982 - invalid admin secret
    if len(ceph_toolbox) == 1:
        log.info("Ceph toolbox already exists, skipping")
        if force_setup:
            log.info("Running force setup for Ceph toolbox!")
        else:
            return
    external_mode = ocsci_config.DEPLOYMENT.get("external_mode")

    if ocsci_config.ENV_DATA.get("ocs_version") == "4.2":
        rook_operator = get_pod_name_by_pattern("rook-ceph-operator",
                                                namespace)
        out = run_cmd(
            f"oc -n {namespace} get pods {rook_operator[0]} -o yaml", )
        version = yaml.safe_load(out)
        rook_version = version["spec"]["containers"][0]["image"]
        tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
        tool_box_data["spec"]["template"]["spec"]["containers"][0][
            "image"] = rook_version
        rook_toolbox = OCS(**tool_box_data)
        rook_toolbox.create()
    else:
        if external_mode:
            toolbox = templating.load_yaml(constants.TOOL_POD_YAML)
            toolbox["metadata"]["name"] += "-external"
            keyring_dict = ocsci_config.EXTERNAL_MODE.get("admin_keyring")
            env = [{"name": "ROOK_ADMIN_SECRET", "value": keyring_dict["key"]}]
            toolbox["spec"]["template"]["spec"]["containers"][0]["env"] = env
            # add ceph volumeMounts
            ceph_volume_mount_path = {
                "mountPath": "/etc/ceph",
                "name": "ceph-config"
            }
            ceph_volume = {"name": "ceph-config", "emptyDir": {}}
            toolbox["spec"]["template"]["spec"]["containers"][0][
                "volumeMounts"].append(ceph_volume_mount_path)
            toolbox["spec"]["template"]["spec"]["volumes"].append(ceph_volume)
            rook_toolbox = OCS(**toolbox)
            rook_toolbox.create()
            return
        # for OCS >= 4.3 there is new toolbox pod deployment done here:
        # https://github.com/openshift/ocs-operator/pull/207/
        log.info("starting ceph toolbox pod")
        run_cmd(
            "oc patch ocsinitialization ocsinit -n openshift-storage --type "
            'json --patch  \'[{ "op": "replace", "path": '
            '"/spec/enableCephTools", "value": true }]\'')