Beispiel #1
0
    def create_couchbase_worker(self, replicas=1):
        """
        Deploy a Couchbase server and pillowfight workload using operator

        The couchbase workers do not come up unless there is an admission controller
        running.  The admission controller is started from the default project prior
        to bringing up the operator.  Secrets, rolebindings and serviceaccounts
        need to also be generated.

        Once the couchbase operator is running, we need to wait for the three
        worker pods to also be up.  Then a pillowfight task is started.

        After the pillowfight task has finished, the log is collected and
        analyzed.

        Raises:
            Exception: If pillowfight results indicate that a minimum performance
                level is not reached (1 second response time, less than 1000 ops
                per second)

        """
        logging.info("Creating pods..")
        cb_example = templating.load_yaml(constants.COUCHBASE_WORKER_EXAMPLE)
        if storagecluster_independent_check():
            cb_example["spec"]["volumeClaimTemplates"][0]["spec"][
                "storageClassName"] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
        cb_example["spec"]["servers"][0]["size"] = replicas
        self.cb_examples = OCS(**cb_example)
        self.cb_examples.create()

        # Wait for last of three workers to be running.

        logging.info("Waiting for the pods to Running")
        for cb_wrk_pods in TimeoutSampler(
                self.WAIT_FOR_TIME,
                3,
                get_pod_name_by_pattern,
                "cb-example",
                constants.COUCHBASE_OPERATOR,
        ):
            try:
                if len(cb_wrk_pods) == replicas:
                    counter = 0
                    for cb_pod in cb_wrk_pods:
                        if self.is_up_and_running(cb_pod, self.up_check):
                            counter += 1
                            logging.info(f"Couchbase worker {cb_pod} is up")
                    if counter == replicas:
                        break
            except IndexError:
                logging.info(
                    f"Expected number of couchbase pods are {replicas} "
                    f"but only found {len(cb_wrk_pods)}")
Beispiel #2
0
 def factory(interface=constants.CEPHBLOCKPOOL):
     if interface == constants.CEPHBLOCKPOOL:
         ceph_pool_obj = helpers.create_ceph_block_pool()
     elif interface == constants.CEPHFILESYSTEM:
         cfs = ocp.OCP(kind=constants.CEPHFILESYSTEM,
                       namespace=defaults.ROOK_CLUSTER_NAMESPACE).get(
                           defaults.CEPHFILESYSTEM_NAME)
         ceph_pool_obj = OCS(**cfs)
     assert ceph_pool_obj, f"Failed to create {interface} pool"
     if interface != constants.CEPHFILESYSTEM:
         instances.append(ceph_pool_obj)
     return ceph_pool_obj
Beispiel #3
0
    def setup_postgresql(self, replicas, sc_name=None):
        """
        Deploy postgres sql server

        Args:
            replicas (int): Number of postgresql pods to be deployed

        Raises:
            CommandFailed: If PostgreSQL server setup fails

        """
        log.info("Deploying postgres database")
        try:
            pgsql_service = templating.load_yaml(constants.PGSQL_SERVICE_YAML)
            pgsql_cmap = templating.load_yaml(constants.PGSQL_CONFIGMAP_YAML)
            pgsql_sset = templating.load_yaml(constants.PGSQL_STATEFULSET_YAML)
            pgsql_sset["spec"]["replicas"] = replicas
            if storagecluster_independent_check():
                pgsql_sset["spec"]["volumeClaimTemplates"][0]["spec"][
                    "storageClassName"] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
            if sc_name:
                pgsql_sset["spec"]["volumeClaimTemplates"][0]["spec"][
                    "storageClassName"] = sc_name
            self.pgsql_service = OCS(**pgsql_service)
            self.pgsql_service.create()
            self.pgsql_cmap = OCS(**pgsql_cmap)
            self.pgsql_cmap.create()
            self.pgsql_sset = OCS(**pgsql_sset)
            self.pgsql_sset.create()
            self.pod_obj.wait_for_resource(
                condition="Running",
                selector="app=postgres",
                resource_count=replicas,
                timeout=3600,
            )
        except (CommandFailed, CalledProcessError) as cf:
            log.error("Failed during setup of PostgreSQL server")
            raise cf
        self.pgsql_is_setup = True
        log.info("Successfully deployed postgres database")
Beispiel #4
0
def get_node_objs(node_names=None):
    """
    Get node objects by node names

    Args:
        node_names (list): The node names to get their objects for.
            If None, will return all cluster nodes

    Returns:
        list: Cluster node OCP objects

    """
    nodes_obj = OCP(kind='node')
    node_dicts = nodes_obj.get()['items']
    if not node_names:
        return [OCS(**node_obj) for node_obj in node_dicts]
    else:
        return [
            OCS(**node_obj) for node_obj in node_dicts if (
                node_obj.get('metadata').get('name') in node_names
            )
        ]
Beispiel #5
0
    def create_kafkadrop(self, wait=True):
        """
        Create kafkadrop pod, service and routes

        Args:
            wait (bool): If true waits till kafkadrop pod running

        Return:
            tuple: Contains objects of kafkadrop pod, service and route

        """
        # Create kafkadrop pod
        try:
            kafkadrop = list(
                templating.load_yaml(constants.KAFKADROP_YAML,
                                     multi_document=True))
            self.kafkadrop_pod = OCS(**kafkadrop[0])
            self.kafkadrop_svc = OCS(**kafkadrop[1])
            self.kafkadrop_route = OCS(**kafkadrop[2])
            self.kafkadrop_pod.create()
            self.kafkadrop_svc.create()
            self.kafkadrop_route.create()
        except (CommandFailed, CalledProcessError) as cf:
            log.error("Failed during creation of kafkadrop which kafka UI")
            raise cf

        # Validate kafkadrop pod running
        if wait:
            ocp_obj = OCP(kind=constants.POD,
                          namespace=constants.AMQ_NAMESPACE)
            ocp_obj.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                selector="app=kafdrop",
                timeout=120,
                sleep=5,
            )

        return self.kafkadrop_pod, self.kafkadrop_svc, self.kafkadrop_route
def setup(self):
    """
    Setting up the environment for the test
    """
    # Create a storage class
    log.info("Creating a Storage Class")
    self.sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML)
    self.sc_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'csi-rbd')
    global SC_OBJ
    SC_OBJ = OCS(**self.sc_data)
    assert SC_OBJ.create()
    log.info(f"Storage class: {SC_OBJ.name} created successfully")
    log.debug(self.sc_data)
Beispiel #7
0
def setup_ceph_toolbox():
    """
    Setup ceph-toolbox based
    """
    namespace = ocsci_config.ENV_DATA['cluster_namespace']
    rook_operator = get_pod_name_by_pattern('rook-ceph-operator', namespace)
    out = run_cmd(f'oc -n {namespace} get pods {rook_operator[0]} -o yaml', )
    version = yaml.safe_load(out)
    rook_version = version['spec']['containers'][0]['image']
    tool_box_data = templating.load_yaml(constants.TOOL_POD_YAML)
    tool_box_data['spec']['template']['spec']['containers'][0][
        'image'] = rook_version
    rook_toolbox = OCS(**tool_box_data)
    rook_toolbox.create()
Beispiel #8
0
def get_job_obj(name, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
    """
    Get the job instance for the given job name

    Args:
        name (str): The name of the job
        namespace (str): The namespace to look in

    Returns:
        OCS: A job OCS instance
    """
    ocp_obj = OCP(kind=constants.JOB, namespace=namespace)
    ocp_dict = ocp_obj.get(resource_name=name)
    return OCS(**ocp_dict)
Beispiel #9
0
def get_machineset_objs(machineset_names=None):
    """
    Get machineset objects by machineset names

    Args:
        machineset_names (list): The machineset names to get their objects
        If None, will return all cluster machines

    Returns:
        list: Cluster machineset OCS objects

    """
    machinesets_obj = OCP(kind=constants.MACHINESETS,
                          namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE)

    machineset_dicts = machinesets_obj.get()["items"]
    if not machineset_names:
        return [OCS(**obj) for obj in machineset_dicts]
    else:
        return [
            OCS(**obj) for obj in machineset_dicts
            if (obj.get("metadata").get("name") in machineset_names)
        ]
Beispiel #10
0
def get_machine_objs(machine_names=None):
    """
    Get machine objects by machine names

    Args:
        machine_names (list): The machine names to get their objects
        If None, will return all cluster machines

    Returns:
        list: Cluster machine OCS objects
    """
    machines_obj = OCP(
        kind='Machine', namespace=constants.OPENSHIFT_MACHINE_API_NAMESPACE
    )
    machine_dicts = machines_obj.get()['items']
    if not machine_names:
        return [OCS(**obj) for obj in machine_dicts]
    else:
        return [
            OCS(**obj) for obj in machine_dicts if (
                obj.get('metadata').get('name') in machine_names
            )
        ]
Beispiel #11
0
def get_node_objs(node_names=None):
    """
    Get node objects by node names

    Args:
        node_names (list): The node names to get their objects for.
            If None, will return all cluster nodes

    Returns:
        list: Cluster node OCP objects

    """
    nodes_obj = OCP(kind="node")
    node_dicts = nodes_obj.get()["items"]
    if not node_names:
        nodes = [OCS(**node_obj) for node_obj in node_dicts]
    else:
        nodes = [
            OCS(**node_obj) for node_obj in node_dicts
            if (node_obj.get("metadata").get("name") in node_names)
        ]
    assert nodes, "Failed to get the nodes OCS objects"
    return nodes
Beispiel #12
0
    def setup_cosbench(self):
        """
        Setups Cosbench namespace, configmap and pod

        """
        # Create cosbench project
        self.ns_obj.new_project(project_name=self.namespace)

        # Create configmap
        config_data = templating.load_yaml(file=constants.COSBENCH_CONFIGMAP)
        cosbench_configmap_name = create_unique_resource_name(
            constants.COSBENCH, "configmap")
        config_data["metadata"]["name"] = cosbench_configmap_name
        config_data["metadata"]["namespace"] = self.namespace
        self.cosbench_config = OCS(**config_data)
        logger.info(
            f"Creating Cosbench configmap: {self.cosbench_config.name}")
        self.cosbench_config.create()
        self.configmap_obj.wait_for_resource(
            resource_name=self.cosbench_config.name,
            column="DATA",
            condition="4")

        # Create Cosbench pod
        cosbench_pod_data = templating.load_yaml(file=constants.COSBENCH_POD)
        cosbench_pod_data["spec"]["containers"][0]["envFrom"][0][
            "configMapRef"]["name"] = self.cosbench_config.name
        cosbench_pod_name = create_unique_resource_name(
            constants.COSBENCH, "pod")
        cosbench_pod_data["metadata"]["name"] = cosbench_pod_name
        cosbench_pod_data["metadata"]["namespace"] = self.namespace
        self.cosbench_pod = OCS(**cosbench_pod_data)
        logger.info(f"Creating Cosbench pod: {self.cosbench_pod.name}")
        self.cosbench_pod.create()
        helpers.wait_for_resource_state(resource=self.cosbench_pod,
                                        state=constants.STATUS_RUNNING,
                                        timeout=300)
Beispiel #13
0
    def test_sql_workload_simple(self, ripsaw):
        """
        This is a basic pgsql workload
        """
        # Deployment postgres
        log.info("Deploying postgres database")
        ripsaw.apply_crd('resources/crds/' 'ripsaw_v1alpha1_ripsaw_crd.yaml')
        ripsaw.setup_postgresql()

        # Create pgbench benchmark
        log.info("Create resource file for pgbench workload")
        pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
        pg_obj = OCS(**pg_data)
        pg_obj.create()

        # Wait for pgbench pod to be created
        for pgbench_pod in TimeoutSampler(300, 3, get_pod_name_by_pattern,
                                          'pgbench-1-dbs-client', 'my-ripsaw'):
            try:
                if pgbench_pod[0] is not None:
                    pgbench_client_pod = pgbench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        # Wait for pg_bench pod to initialized and complete
        log.info("Waiting for pgbench_client to complete")
        pod_obj = OCP(kind='pod')
        pod_obj.wait_for_resource(
            condition='Completed',
            resource_name=pgbench_client_pod,
            timeout=800,
            sleep=10,
        )

        # Running pgbench and parsing logs
        output = run_cmd(f'oc logs {pgbench_client_pod}')
        pg_output = utils.parse_pgsql_logs(output)
        log.info("*******PGBench output log*********\n" f"{pg_output}")
        for data in pg_output:
            latency_avg = data['latency_avg']
            if not latency_avg:
                raise UnexpectedBehaviour("PGBench failed to run, "
                                          "no data found on latency_avg")
        log.info("PGBench has completed successfully")

        # Clean up pgbench benchmark
        log.info("Deleting PG bench benchmark")
        pg_obj.delete()
Beispiel #14
0
def cls_pvc(request, storage_class):
    """
    PVC fixture

    Returns:
        PVC: object of PVC class
    """
    def fin():
        pvc.delete()

    request.addfinalizer(fin)
    data = {'api_version': 'v1', 'kind': 'namespace'}
    # data is ususally loaded from yaml template
    pvc = OCS(**data)
    return pvc
Beispiel #15
0
    def backed_pv_obj(self):
        """
        Returns the backed PV object of pvc_name in namespace

        Returns:
            OCS: An OCS instance for PV
        """
        self.reload()
        data = dict()
        data["api_version"] = self.api_version
        data["kind"] = "PersistentVolume"
        data["metadata"] = {"name": self.backed_pv, "namespace": self.namespace}
        pv_obj = OCS(**data)
        pv_obj.reload()
        return pv_obj
Beispiel #16
0
    def setup_amq_cluster_operator(self, namespace=constants.AMQ_NAMESPACE):
        """
        Function to setup amq-cluster_operator,
        the file is pulling from github
        it will make sure cluster-operator pod is running

        Args:
            namespace (str): Namespace for AMQ pods

        """

        # Namespace for amq
        try:
            self.create_namespace(namespace)
        except CommandFailed as ef:
            if f'project.project.openshift.io "{namespace}" already exists' not in str(
                    ef):
                raise ef

        # Create strimzi-cluster-operator pod
        run(
            f"for i in `(ls strimzi-kafka-operator/packaging/install/cluster-operator/)`;"
            f"do sed 's/{namespace}/myproject/g' "
            f"strimzi-kafka-operator/packaging/install/cluster-operator/$i;done",
            shell=True,
            check=True,
            cwd=self.dir,
        )
        self.strimzi_kafka_operator = os.path.join(self.dir, self.amq_dir)
        pf_files = os.listdir(self.strimzi_kafka_operator)
        crds = []
        for crd in pf_files:
            crds.append(crd)
        self.crd_objects = []
        for adm_yaml in crds:
            adm_data = templating.load_yaml(self.strimzi_kafka_operator +
                                            adm_yaml)
            adm_obj = OCS(**adm_data)
            adm_obj.create()
            self.crd_objects.append(adm_obj)
        time.sleep(30)
        #  Check strimzi-cluster-operator pod created
        if self.is_amq_pod_running(pod_pattern="cluster-operator",
                                   expected_pods=1):
            log.info("strimzi-cluster-operator pod is in running state")
        else:
            raise ResourceWrongStatusException(
                "strimzi-cluster-operator pod is not getting to running state")
Beispiel #17
0
    def test_sql_workload_simple(self, ripsaw):
        """
        This is a basic pgsql workload
        """
        # Deployment postgres
        log.info("Deploying postgres database")
        ripsaw.apply_crd('resources/crds/' 'ripsaw_v1alpha1_ripsaw_crd.yaml')
        ripsaw.setup_postgresql()
        run_cmd('bin/oc wait --for condition=ready pod '
                '-l app=postgres '
                '--timeout=120s')

        # Create pgbench benchmark
        log.info("Create resource file for pgbench workload")
        pg_data = templating.load_yaml_to_dict(constants.PGSQL_BENCHMARK_YAML)
        pg_obj = OCS(**pg_data)
        pg_obj.create()
        # Wait for pgbench pod to be created
        log.info("waiting for pgbench benchmark to create, "
                 f"PGbench pod name: {pg_obj.name} ")
        wait_time = 30
        log.info(f"Waiting {wait_time} seconds...")
        time.sleep(wait_time)

        pgbench_pod = run_cmd('bin/oc get pods -l '
                              'app=pgbench-client -o name')
        pgbench_pod = pgbench_pod.split('/')[1]
        run_cmd('bin/oc wait --for condition=Initialized '
                f'pods/{pgbench_pod} '
                '--timeout=60s')
        run_cmd('bin/oc wait --for condition=Complete jobs '
                '-l app=pgbench-client '
                '--timeout=300s')

        # Running pgbench and parsing logs
        output = run_cmd(f'bin/oc logs {pgbench_pod}')
        pg_output = utils.parse_pgsql_logs(output)
        log.info("*******PGBench output log*********\n" f"{pg_output}")
        for data in pg_output:
            latency_avg = data['latency_avg']
            if not latency_avg:
                raise UnexpectedBehaviour("PGBench failed to run, "
                                          "no data found on latency_avg")
        log.info("PGBench has completed successfully")

        # Clean up pgbench benchmark
        log.info("Deleting PG bench benchmark:")
        pg_obj.delete()
Beispiel #18
0
    def create_ocs_jenkins_template(self):
        """

        Create OCS Jenkins Template
        """
        log.info("Create Jenkins Template, jenkins-persistent-ocs")
        ocp_obj = OCP(namespace="openshift", kind="template")
        tmp_dict = ocp_obj.get(resource_name="jenkins-persistent",
                               out_yaml_format=True)
        tmp_dict["labels"]["app"] = "jenkins-persistent-ocs"
        tmp_dict["labels"]["template"] = "jenkins-persistent-ocs-template"
        tmp_dict["metadata"]["name"] = "jenkins-persistent-ocs"
        # Find Kind: 'PersistentVolumeClaim' position in the objects list, differs in OCP 4.5 and OCP 4.6.
        sc_name = (constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
                   if storagecluster_independent_check() else
                   constants.DEFAULT_STORAGECLASS_RBD)
        for i in range(len(tmp_dict["objects"])):
            if tmp_dict["objects"][i]["kind"] == constants.PVC:
                tmp_dict["objects"][i]["metadata"]["annotations"] = {
                    "volume.beta.kubernetes.io/storage-class": sc_name
                }

        tmp_dict["parameters"][4]["value"] = "10Gi"
        tmp_dict["parameters"].append({
            "description":
            "Override jenkins options to speed up slave spawning",
            "displayName":
            "Override jenkins options to speed up slave spawning",
            "name":
            "JAVA_OPTS",
            "value":
            "-Dhudson.slaves.NodeProvisioner.initialDelay=0 "
            "-Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson."
            "slaves.NodeProvisioner.MARGIN0=0.85",
        })
        if Version.coerce(self.ocp_version) >= Version.coerce("4.8"):
            # Added "Pipeline Utility Steps" plugin via Jenkins Template
            # OCP team changed the default plugin list on OCP4.9
            tmp_dict["objects"][3]["spec"]["template"]["spec"]["containers"][
                0]["env"].append({
                    "name":
                    "INSTALL_PLUGINS",
                    "value":
                    "scm-api:2.6.5,pipeline-utility-steps:2.12.0,workflow-step-api:622."
                    "vb_8e7c15b_c95a_,workflow-cps:2648.va9433432b33c,workflow-api:2.47",
                })
        ocs_jenkins_template_obj = OCS(**tmp_dict)
        ocs_jenkins_template_obj.create()
    def create_jenkins_build_config(self):
        """
        create jenkins build config

        """
        for project in self.projects:
            try:
                log.info(f'create build config on {project}')
                jenkins_build_config = templating.load_yaml(
                    constants.JENKINS_BUILDCONFIG_YAML)
                jenkins_build_config['metadata']['namespace'] = project
                jenkins_build_config_obj = OCS(**jenkins_build_config)
                jenkins_build_config_obj.create()
            except (CommandFailed, CalledProcessError) as cf:
                log.error('Failed to create Jenkins build config')
                raise cf
    def get_builds_obj(self, namespace):
        """
        Get all jenkins builds

        Returns:
            List: jenkins deploy pod objects list

        """
        build_obj_list = []
        build_list = self.get_build_name_by_pattern(
            pattern=constants.JENKINS_BUILD, namespace=namespace)
        for build_name in build_list:
            ocp_obj = OCP(api_version='v1', kind='Build', namespace=namespace)
            ocp_dict = ocp_obj.get(resource_name=build_name)
            build_obj_list.append(OCS(**ocp_dict))
        return build_obj_list
def setup_fs(self):
    """
    Setting up the environment for the test
    """
    global CEPH_OBJ
    self.fs_data = templating.load_yaml_to_dict(constants.CEPHFILESYSTEM_YAML)
    self.fs_data['metadata']['name'] = helpers.create_unique_resource_name(
        'test', 'cephfs')
    self.fs_data['metadata']['namespace'] = config.ENV_DATA[
        'cluster_namespace']
    CEPH_OBJ = OCS(**self.fs_data)
    CEPH_OBJ.create()
    assert POD.wait_for_resource(condition='Running',
                                 selector='app=rook-ceph-mds')
    pods = POD.get(selector='app=rook-ceph-mds')['items']
    assert len(pods) == 2
Beispiel #22
0
    def create_couchbase_worker(self, replicas=1):
        """
        Deploy a Couchbase server and pillowfight workload using operator

        The couchbase workers do not come up unless there is an admission controller
        running.  The admission controller is started from the default project prior
        to bringing up the operator.  Secrets, rolebindings and serviceaccounts
        need to also be generated.

        Once the couchbase operator is running, we need to wait for the three
        worker pods to also be up.  Then a pillowfight task is started.

        After the pillowfight task has finished, the log is collected and
        analyzed.

        Raises:
            Exception: If pillowfight results indicate that a minimum performance
                level is not reached (1 second response time, less than 1000 ops
                per second)

        """
        logging.info('Creating pods..')
        cb_example = templating.load_yaml(constants.COUCHBASE_WORKER_EXAMPLE)
        cb_example['spec']['servers'][0]['size'] = replicas
        self.cb_examples = OCS(**cb_example)
        self.cb_examples.create()

        # Wait for last of three workers to be running.

        logging.info('Waiting for the pods to Running')
        for cb_wrk_pods in TimeoutSampler(self.WAIT_FOR_TIME, 3,
                                          get_pod_name_by_pattern,
                                          'cb-example',
                                          constants.COUCHBASE_OPERATOR):
            try:
                if len(cb_wrk_pods) == replicas:
                    counter = 0
                    for cb_pod in cb_wrk_pods:
                        if self.is_up_and_running(cb_pod, self.up_check):
                            counter += 1
                            logging.info(f'Couchbase worker {cb_pod} is up')
                    if counter == replicas:
                        break
            except IndexError:
                logging.info(
                    f'Expected number of couchbase pods are {replicas} '
                    f'but only found {len(cb_wrk_pods)}')
Beispiel #23
0
    def setup_amq_kafka_persistent(self, sc_name, size=100, replicas=3):
        """
        Function to setup amq-kafka-persistent, the file is pulling from github
        it will make kind: Kafka and will make sure the status is running

        Args:
            sc_name (str): Name of sc
            size (int): Size of the storage in Gi
            replicas (int): Number of kafka and zookeeper pods to be created

        return : kafka_persistent

        """
        if storagecluster_independent_check():
            sc_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
        try:
            kafka_persistent = templating.load_yaml(
                os.path.join(self.dir, self.amq_kafka_pers_yaml)
            )
            kafka_persistent["spec"]["kafka"]["replicas"] = replicas
            kafka_persistent["spec"]["kafka"]["storage"]["volumes"][0][
                "class"
            ] = sc_name
            kafka_persistent["spec"]["kafka"]["storage"]["volumes"][0][
                "size"
            ] = f"{size}Gi"

            kafka_persistent["spec"]["zookeeper"]["replicas"] = replicas
            kafka_persistent["spec"]["zookeeper"]["storage"]["class"] = sc_name
            kafka_persistent["spec"]["zookeeper"]["storage"]["size"] = f"{size}Gi"
            self.kafka_persistent = OCS(**kafka_persistent)
            self.kafka_persistent.create()

        except (CommandFailed, CalledProcessError) as cf:
            log.error("Failed during setup of AMQ Kafka-persistent")
            raise cf
        time.sleep(40)

        if self.is_amq_pod_running(
            pod_pattern="my-cluster", expected_pods=(replicas * 2) + 1
        ):
            return self.kafka_persistent
        else:
            raise ResourceWrongStatusException(
                "my-cluster-kafka and my-cluster-zookeeper "
                "Pod is not getting to running state"
            )
Beispiel #24
0
def create_workload_job(job_name,
                        bucket,
                        project,
                        mcg_obj,
                        resource_path,
                        custom_options=None):
    """
    Creates kubernetes job that should utilize MCG bucket.

    Args:
        job_name (str): Name of the job
        bucket (objt): MCG bucket with S3 interface
        project (obj): OCP object representing OCP project which will be
            used for the job
        mcg_obj (obj): instance of MCG class
        resource_path (str): path to directory where should be created
            resources
        custom_options (dict): Dictionary of lists containing tuples with
            additional configuration for fio in format:
            {'section': [('option', 'value'),...],...}
            e.g.
            {'global':[('name','bucketname')],'create':[('time_based','1'),('runtime','48h')]}
            Those values can be added to the config or rewrite already existing
            values

    Returns:
        obj: Job object

    """
    fio_job_dict = get_job_dict(job_name)
    fio_configmap_dict = get_configmap_dict(fio_job_dict, mcg_obj, bucket,
                                            custom_options)
    fio_objs = [fio_configmap_dict, fio_job_dict]

    log.info(f"Creating MCG workload job {job_name}")
    job_file = ObjectConfFile("fio_continuous", fio_objs, project,
                              resource_path)

    # deploy the Job to the cluster and start it
    job_file.create()
    log.info(f"Job {job_name} created")

    # get job object
    ocp_job_obj = ocp.OCP(kind=constants.JOB, namespace=project.namespace)
    job = OCS(**ocp_job_obj.get(resource_name=job_name))

    return job
Beispiel #25
0
def storage_class(request):
    """
    Storage class fixture

    Returns:
        StorageClass: object of storage class
    """
    def fin():
        sc.delete()

    request.addfinalizer(fin)

    logger.info("Creating storage class")
    data = {'api_version': 'v1', 'kind': 'namespace'}
    # data is ususally loaded from yaml template
    sc = OCS(**data)
    return sc
Beispiel #26
0
def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None):
    """
    Fetches info about osd deployments in the cluster

    Args:
        osd_label (str): label associated with osd deployments
            (default: defaults.OSD_APP_LABEL)
        namespace (str): Namespace in which ceph cluster lives
            (default: defaults.ROOK_CLUSTER_NAMESPACE)

    Returns:
        list: OSD deployment OCS instances
    """
    namespace = namespace or config.ENV_DATA['cluster_namespace']
    osds = get_deployments_having_label(osd_label, namespace)
    osd_deployments = [OCS(**osd) for osd in osds]
    return osd_deployments
Beispiel #27
0
    def run_pillowfights(self):
        """
        loop through all the yaml files extracted from the pillowfight repo
        and run them.  Run oc logs on the results and save the logs in self.logs
        directory

        """
        ocp_local = OCP(namespace=self.namespace)
        pf_files = listdir(constants.TEMPLATE_PILLOWFIGHT_DIR)
        for pf_yaml in pf_files:
            pf_fullpath = join(constants.TEMPLATE_PILLOWFIGHT_DIR, pf_yaml)
            if not pf_fullpath.endswith('.yaml'):
                continue
            if not isfile(pf_fullpath):
                continue
            pfight = templating.load_yaml(pf_fullpath)
            lpillowfight = OCS(**pfight)
            lpillowfight.create()
            pf_completion_info = ''
            pf_pod = ''
            for pillowfight_pod in TimeoutSampler(self.WAIT_FOR_TIME, 3,
                                                  get_pod_name_by_pattern,
                                                  'pillowfight',
                                                  self.COUCHBASE_OPERATOR):
                try:
                    pf_pod = pillowfight_pod[0]
                    pod_info = self.up_check.exec_oc_cmd(
                        f"get pods {pf_pod} -o json")
                    pf_status = pod_info['status']['containerStatuses'][0][
                        'state']
                    if 'terminated' in pf_status:
                        pf_completion_info = pf_status['terminated']['reason']
                        break
                except IndexError:
                    log.info(f"Pillowfight {pf_yaml} not yet completed")
            if pf_completion_info == 'Error':
                raise Exception(f"Pillowfight {pf_yaml} failed to complete")
            if pf_completion_info == 'Completed':
                pf_prefix = pf_yaml[0:pf_yaml.find(".")]
                pf_endlog = f'{pf_prefix}.log'
                pf_log = join(self.logs, pf_endlog)
                data_from_log = ocp_local.exec_oc_cmd(
                    f"logs -f {pf_pod} --ignore-errors", out_yaml_format=False)
                data_from_log = data_from_log.replace('\x00', '')
                with open(pf_log, 'w') as fd:
                    fd.write(data_from_log)
Beispiel #28
0
    def reclaim_policy(self):
        """
        Returns the reclaim policy of pvc in namespace

        Returns:
            str: Reclaim policy Reclaim or Delete
        """

        data = dict()
        data['api_version'] = self.api_version
        data['kind'] = 'StorageClass'
        data['metadata'] = {
            'name': self.backed_sc, 'namespace': self.namespace
        }
        sc_obj = OCS(**data)
        sc_obj.reload()
        return sc_obj.get().get('reclaimPolicy')
Beispiel #29
0
    def backed_pv_obj(self):
        """
        Returns the backed PV object of pvc_name in namespace

        Returns:
            OCS: An OCS instance for PV
        """
        self.reload()
        data = dict()
        data['api_version'] = self.api_version
        data['kind'] = 'PersistentVolume'
        data['metadata'] = {
            'name': self.backed_pv, 'namespace': self.namespace
        }
        pv_obj = OCS(**data)
        pv_obj.reload()
        return pv_obj
Beispiel #30
0
    def create_cb_cluster(self, replicas=1, sc_name=None):
        """
        Deploy a Couchbase server using Couchbase operator

        Once the couchbase operator is running, we need to wait for the
        worker pods to be up.  Once the Couchbase worker pods are up, pillowfight
        task is started.

        After the pillowfight task has finished, the log is collected and
        analyzed.

        Raises:
            Exception: If pillowfight results indicate that a minimum performance
                level is not reached (1 second response time, less than 1000 ops
                per second)

        """
        log.info("Creating Couchbase worker pods...")
        cb_example = templating.load_yaml(constants.COUCHBASE_WORKER_EXAMPLE)

        if (storagecluster_independent_check()
                and config.ENV_DATA["platform"].lower()
                not in constants.MANAGED_SERVICE_PLATFORMS):
            cb_example["spec"]["volumeClaimTemplates"][0]["spec"][
                "storageClassName"] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
        cb_example["spec"]["servers"][0]["size"] = replicas
        if sc_name:
            cb_example["spec"]["volumeClaimTemplates"][0]["spec"][
                "storageClassName"] = sc_name
        self.cb_example = OCS(**cb_example)
        self.cb_example.create()
        self.cb_create_cb_cluster = True

        # Wait for the Couchbase workers to be running.

        log.info("Waiting for the Couchbase pods to be Running")
        self.pod_obj.wait_for_resource(
            condition="Running",
            selector="app=couchbase",
            resource_count=replicas,
            timeout=900,
        )
        log.info(
            f"Expected number: {replicas} of couchbase workers reached running state"
        )