Beispiel #1
0
    def cleanup(self):
        """
        Clean up

        """
        for project in self.projects:
            log.info(f"Delete Jenkins project: {project}")
            ocp_obj = OCP(namespace=project)
            ocp_obj.delete_project(project)

        log.info("Delete Jenkins Template")
        ocp_obj = OCP()
        cmd = "delete template.template.openshift.io/jenkins-persistent-ocs -n openshift"
        ocp_obj.exec_oc_cmd(command=cmd, out_yaml_format=False)
        # Wait for the resources to delete
        # https://github.com/red-hat-storage/ocs-ci/issues/2417
        time.sleep(120)
Beispiel #2
0
class ElasticSearch(object):
    """
    ElasticSearch Environment
    """
    def __init__(self):
        """
        Initializer function

        """
        log.info("Initializing the Elastic-Search environment object")
        self.namespace = "elastic-system"
        self.eck_file = "ocs_ci/templates/app-pods/eck.1.3.1-all-in-one.yaml"
        self.dumper_file = "ocs_ci/templates/app-pods/esclient.yaml"
        self.pvc = "ocs_ci/templates/app-pods/es-pvc.yaml"
        self.crd = "ocs_ci/templates/app-pods/esq.yaml"

        # Creating some different types of OCP objects
        self.ocp = OCP(kind="pod",
                       resource_name="elastic-operator-0",
                       namespace=self.namespace)
        self.ns_obj = OCP(kind="namespace", namespace=self.namespace)
        self.es = OCP(resource_name="quickstart-es-http",
                      namespace=self.namespace)
        self.elasticsearch = OCP(namespace=self.namespace,
                                 kind="elasticsearch")
        self.password = OCP(
            kind="secret",
            resource_name="quickstart-es-elastic-user",
            namespace=self.namespace,
        )

        # Deploy the ECK all-in-one.yaml file
        self._deploy_eck()
        # Deploy the Elastic-Search server
        self._deploy_es()

        # Verify that ES is Up & Running
        timeout = 600
        while timeout > 0:
            if self.get_health():
                log.info("The ElasticSearch server is ready !")
                break
            else:
                log.warning("The ElasticSearch server is not ready yet")
                log.info("going to sleep for 30 sec. before next check")
                time.sleep(30)
                timeout -= 30

        self._deploy_data_dumper_client()

        # Connect to the server
        self.con = self._es_connect()

    def _deploy_eck(self):
        """
        Deploying the ECK environment for the Elasticsearch, and make sure it
        is in Running mode

        """

        log.info("Deploying the ECK environment for the ES cluster")
        self.ocp.apply(self.eck_file)

        for es_pod in TimeoutSampler(300, 10, get_pod_name_by_pattern,
                                     "elastic-operator", self.namespace):
            try:
                if es_pod[0] is not None:
                    self.eckpod = es_pod[0]
                    log.info(f"The ECK pod {self.eckpod} is ready !")
                    break
            except IndexError:
                log.info("ECK operator pod not ready yet")

    def _deploy_data_dumper_client(self):
        """
        Deploying elastic search client pod with utility which dump all the data
        from the server to .tgz file

        """

        log.info("Deploying the es client for dumping all data")
        self.ocp.apply(self.dumper_file)

        for dmp_pod in TimeoutSampler(300, 10, get_pod_name_by_pattern,
                                      "es-dumper", self.namespace):
            try:
                if dmp_pod[0] is not None:
                    self.dump_pod = dmp_pod[0]
                    log.info(
                        f"The dumper client pod {self.dump_pod} is ready !")
                    break
            except IndexError:
                log.info("Dumper pod not ready yet")

    def get_ip(self):
        """
        This function return the IP address of the Elasticsearch cluster.
        this IP is to use inside the OCP cluster

        Return
            str : String that represent the Ip Address.

        """
        return self.es.get()["spec"]["clusterIP"]

    def get_port(self):
        """
        This function return the port of the Elasticsearch cluster.

        Return
            str : String that represent the port.

        """
        return self.es.get()["spec"]["ports"][0]["port"]

    def _deploy_es(self):
        log.info("Deploy the PVC for the ElasticSearch cluster")
        self.ocp.apply(self.pvc)

        log.info("Deploy the ElasticSearch cluster")
        self.ocp.apply(self.crd)

        for es_pod in TimeoutSampler(300, 20, get_pod_name_by_pattern,
                                     "quickstart-es-default", self.namespace):
            try:
                if es_pod[0] is not None:
                    self.espod = es_pod[0]
                    log.info(f"The ElasticSearch pod {self.espod} Started")
                    break
            except IndexError:
                log.info("elasticsearch pod not ready yet")

        es_pod = OCP(kind="pod", namespace=self.namespace)
        log.info("Waiting for ElasticSearch to Run")
        assert es_pod.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            resource_name=self.espod,
            sleep=30,
            timeout=600,
        )
        log.info("Elastic Search is ready !!!")

    def get_health(self):
        """
        This method return the health status of the Elasticsearch.

        Returns:
            bool : True if the status is green (OK) otherwise - False

        """
        return self.elasticsearch.get(
        )["items"][0]["status"]["health"] == "green"

    def get_password(self):
        """
        This method return the password used to connect the Elasticsearch.

        Returns:
            str : The password as text

        """
        return base64.b64decode(
            self.password.get()["data"]["elastic"]).decode("utf-8")

    def cleanup(self):
        """
        Cleanup the environment from all Elasticsearch components, and from the
        port forwarding process.

        """
        log.info("Teardown the Elasticsearch environment")
        log.info("Deleting all resources")
        log.info("Deleting the dumper client pod")
        self.ocp.delete(yaml_file=self.dumper_file)
        log.info("Deleting the es resource")
        self.ocp.delete(yaml_file=self.crd)
        log.info("Deleting the es project")
        self.ns_obj.delete_project(project_name=self.namespace)
        self.ns_obj.wait_for_delete(resource_name=self.namespace, timeout=180)

    def _es_connect(self):
        """
        Create a connection to the local ES

        Returns:
            Elasticsearch: elasticsearch connection object

        Raise:
            ConnectionError: if can not connect to the server

        """
        try:
            es = Elasticsearch([{
                "host": self.get_ip(),
                "port": self.get_port()
            }])
        except esexp.ConnectionError:
            log.error("Can not connect to ES server in the LocalServer")
            raise
        return es

    def get_indices(self):
        """
        Getting list of all indices in the ES server - all created by the test,
        the installation of the ES was without any indexes pre-installed.

        Returns:
            list : list of all indices defined in the ES server

        """
        results = []
        log.info("Getting all indices")
        for ind in self.con.indices.get_alias("*"):
            results.append(ind)
        return results

    def _copy(self, es):
        """
        Copy All data from the internal ES server to the main ES.

        **This is deprecated function** , use the dump function, and load
        the data from the files for the main ES server

        Args:
            es (obj): elasticsearch object which connected to the main ES
        """

        query = {"size": 1000, "query": {"match_all": {}}}
        for ind in self.get_indices():
            log.info(f"Reading {ind} from internal ES server")
            try:
                result = self.con.search(index=ind, body=query)
            except esexp.NotFoundError:
                log.warning(f"{ind} Not found in the Internal ES.")
                continue

            log.debug(f"The results from internal ES for {ind} are :{result}")
            log.info(f"Writing {ind} into main ES server")
            for doc in result["hits"]["hits"]:
                log.debug(f"Going to write : {doc}")
                es.index(index=ind, doc_type="_doc", body=doc["_source"])

    def dumping_all_data(self, target_path):
        """
        Dump All data from the internal ES server to .tgz file.

        Args:
            target_path (str): the path where the results file will be copy into

        Return:
            bool: True if the dump operation succeed and return the results data to the host
                  otherwise False
        """

        log.info("dumping data from ES server to .tgz file")
        rsh_cmd = f"rsh {self.dump_pod} /elasticsearch-dump/esdumper.py --ip {self.get_ip()} --port {self.get_port()}"
        result = self.ocp.exec_oc_cmd(rsh_cmd,
                                      out_yaml_format=False,
                                      timeout=1200)
        if "ES dump is done." not in result:
            log.error("There is no data in the Elasticsearch server")
            return False
        else:
            src_file = result.split()[-1]
            log.info(f"Copy {src_file} from the client pod")

            cp_command = f"cp {self.dump_pod}:{src_file} {target_path}/FullResults.tgz"
            result = self.ocp.exec_oc_cmd(cp_command, timeout=120)
            log.info(f"The output from the POD is {result}")
            log.info("Extracting the FullResults.tgz file")
            kwargs = {"cwd": target_path}
            results = run_command(f"tar zxvf {target_path}/FullResults.tgz",
                                  **kwargs)
            log.debug(f"The untar results is {results}")
            if "Error in command" in results:
                log.warning("Can not untar the dumped file")
                return False

        return True
Beispiel #3
0
class Cosbench(object):
    """
    Cosbench S3 benchmark tool

    """
    def __init__(self):
        """
        Initializer function

        """
        self.ns_obj = OCP(kind="namespace")
        self.namespace = constants.COSBENCH_PROJECT
        self.configmap_obj = OCP(namespace=self.namespace,
                                 kind=constants.CONFIGMAP)
        self.ocp_obj = OCP(namespace=self.namespace)
        self.cosbench_config = None
        self.cosbench_pod = None
        self.cosbench_dir = mkdtemp(prefix="cosbench-tool-")
        self.xml_file = ""
        self.workload_id = ""
        self.init_container = 1
        self.range_selector = "r"
        self.init_object = 1
        mcg_obj = MCG()
        self.access_key_id = mcg_obj.access_key_id
        self.access_key = mcg_obj.access_key
        self.endpoint = (
            "http://" +
            mcg_obj.s3_internal_endpoint.split("/")[2].split(":")[0])

    def setup_cosbench(self):
        """
        Setups Cosbench namespace, configmap and pod

        """
        # Create cosbench project
        self.ns_obj.new_project(project_name=self.namespace)

        # Create configmap
        config_data = templating.load_yaml(file=constants.COSBENCH_CONFIGMAP)
        cosbench_configmap_name = create_unique_resource_name(
            constants.COSBENCH, "configmap")
        config_data["metadata"]["name"] = cosbench_configmap_name
        config_data["metadata"]["namespace"] = self.namespace
        self.cosbench_config = OCS(**config_data)
        logger.info(
            f"Creating Cosbench configmap: {self.cosbench_config.name}")
        self.cosbench_config.create()
        self.configmap_obj.wait_for_resource(
            resource_name=self.cosbench_config.name,
            column="DATA",
            condition="4")

        # Create Cosbench pod
        cosbench_pod_data = templating.load_yaml(file=constants.COSBENCH_POD)
        cosbench_pod_data["spec"]["containers"][0]["envFrom"][0][
            "configMapRef"]["name"] = self.cosbench_config.name
        cosbench_pod_name = create_unique_resource_name(
            constants.COSBENCH, "pod")
        cosbench_pod_data["metadata"]["name"] = cosbench_pod_name
        cosbench_pod_data["metadata"]["namespace"] = self.namespace
        self.cosbench_pod = OCS(**cosbench_pod_data)
        logger.info(f"Creating Cosbench pod: {self.cosbench_pod.name}")
        self.cosbench_pod.create()
        helpers.wait_for_resource_state(resource=self.cosbench_pod,
                                        state=constants.STATUS_RUNNING,
                                        timeout=300)

    def _apply_mcg_auth(self, xml_root):
        """
        Applies MCG credentials

        Args:
            xml_root (Element): Root element of workload xml

        """
        xml_root[0].set(
            "config",
            f"accesskey={self.access_key_id};secretkey={self.access_key};"
            f"endpoint={self.endpoint};path_style_access=true",
        )

    def run_init_workload(
        self,
        prefix,
        containers,
        objects,
        start_container=None,
        start_object=None,
        size=64,
        size_unit="KB",
        sleep=15,
        timeout=300,
        validate=True,
    ):
        """
        Creates specific containers and objects in bulk

        Args:
            prefix (str): Prefix of bucket name.
            containers (int): Number of containers/buckets to be created.
            objects (int): Number of objects to be created on each bucket.
            start_container (int): Start of containers. Default: 1.
            start_object (int): Start of objects. Default: 1.
            size (int): Size of each objects.
            size_unit (str): Object size unit (B/KB/MB/GB)
            sleep (int): Sleep in seconds.
            timeout (int): Timeout in seconds.
            validate (bool): Validates whether init and prepare is completed.

        Returns:
            Tuple[str, str]: Workload xml and its name

        """
        init_template = """
        <workload name="Fill" description="Init and prepare operation">
        <storage type="s3" config="" />
          <workflow>
            <workstage name="init-containers">
              <work type="init" workers="1" config="" />
            </workstage>
            <workstage name="prepare-objects">
              <work type="prepare" workers="16" config="" />
            </workstage>
          </workflow>
        </workload>
        """
        xml_root, xml_tree = self._create_element_tree(template=init_template)
        workload_name = xml_root.get("name")
        self._apply_mcg_auth(xml_root)
        self.init_container = (start_container
                               if start_container else self.init_container)
        self.init_object = start_object if start_object else self.init_object
        init_container_config = self.generate_container_stage_config(
            self.range_selector,
            self.init_container,
            containers,
        )
        init_config = self.generate_stage_config(
            self.range_selector,
            self.init_container,
            containers,
            self.init_object,
            objects,
        )
        for stage in xml_root.iter("work"):
            if stage.get("type") == "init":
                stage.set("config",
                          f"cprefix={prefix};{init_container_config}")
            elif stage.get("type") == "prepare":
                stage.set(
                    "config",
                    f"cprefix={prefix};{init_config};sizes=c({str(size)}){size_unit}",
                )
        self._create_tmp_xml(xml_tree=xml_tree, xml_file_prefix=workload_name)
        self.submit_workload(workload_path=self.xml_file)
        self.wait_for_workload(workload_id=self.workload_id,
                               sleep=sleep,
                               timeout=timeout)
        if validate:
            self.validate_workload(workload_id=self.workload_id,
                                   workload_name=workload_name)
        else:
            return self.workload_id, workload_name

    def run_cleanup_workload(
        self,
        prefix,
        containers,
        objects,
        start_container=None,
        start_object=None,
        sleep=15,
        timeout=300,
        validate=True,
    ):
        """
        Deletes specific objects and containers in bulk.

        Args:
            prefix (str): Prefix of bucket name.
            containers (int): Number of containers/buckets to be created.
            objects (int): Number of objects to be created on each bucket.
            start_container (int): Start of containers. Default: 1.
            start_object (int): Start of objects. Default: 1.
            sleep (int): Sleep in seconds.
            timeout (int): Timeout in seconds.
            validate (bool): Validates whether cleanup and dispose is completed.

        Returns:
            Tuple[str, str]: Workload xml and its name

        """
        cleanup_template = """
        <workload name="Cleanup" description="Cleanup and Dispose">
          <storage type="s3" config="" />
          <workflow>
            <workstage name="cleanup-objects">
              <work type="cleanup" workers="4" config="" />
            </workstage>
            <workstage name="dispose-containers">
              <work type="dispose" workers="1" config="" />
            </workstage>
          </workflow>
        </workload>
        """
        xml_root, xml_tree = self._create_element_tree(
            template=cleanup_template)
        workload_name = xml_root.get("name")
        self._apply_mcg_auth(xml_root)
        self.init_container = (start_container
                               if start_container else self.init_container)
        self.init_object = start_object if start_object else self.init_object
        cleanuo_config = self.generate_stage_config(
            self.range_selector,
            self.init_container,
            containers,
            self.init_object,
            objects,
        )
        for stage in xml_root.iter("work"):
            if stage.get("type") == "cleanup":
                stage.set(
                    "config",
                    f"cprefix={prefix};{cleanuo_config}",
                )
            elif stage.get("type") == "dispose":
                stage.set("config", f"cprefix={prefix};{cleanuo_config}")

        self._create_tmp_xml(xml_tree=xml_tree, xml_file_prefix=workload_name)
        self.submit_workload(workload_path=self.xml_file)
        self.wait_for_workload(workload_id=self.workload_id,
                               sleep=sleep,
                               timeout=timeout)
        if validate:
            self.validate_workload(workload_id=self.workload_id,
                                   workload_name=workload_name)
        else:
            return self.workload_id, workload_name

    def run_main_workload(
        self,
        operation_type,
        prefix,
        containers,
        objects,
        workers=4,
        selector="s",
        start_container=None,
        start_object=None,
        size=64,
        size_unit="KB",
        sleep=15,
        timeout=300,
        extend_objects=None,
        validate=True,
        result=True,
    ):
        """
        Creates and runs main Cosbench workload.

        Args:
            operation_type (dict): Cosbench operation and its ratio.
                                   Operation (str): Supported ops are read, write, list and delete.
                                   Ratio (int): Percentage of each operation. Should add up to 100.
            workers (int): Number of users to perform operations.
            containers (int): Number of containers/buckets to be created.
            objects (int): Number of objects to be created on each bucket.
            selector (str): The way object is accessed/selected. u=uniform, r=range, s=sequential.
            prefix (str): Prefix of bucket name.
            start_container (int): Start of containers. Default: 1.
            start_object (int): Start of objects. Default: 1.
            size (int): Size of each objects.
            size_unit (str): Object size unit (B/KB/MB/GB)
            sleep (int): Sleep in seconds
            timeout (int): Timeout in seconds
            validate (bool): Validates whether each stage is completed
            extend_objects (int): Extends the total number of objects to prevent overlap.
                                  Use only for Write and Delete operations.
            result (bool): Get performance results when running workload is completed.

        Returns:
            Tuple[str, str]: Workload xml and its name

        """
        main_template = """
        <workload name="workload_name" description="Main workload">
          <storage type="s3" config="" />
          <workflow>
            <workstage name="Main">
              <work name="work_name" workers="4" division="object" runtime="60">
              </work>
            </workstage>
          </workflow>
        </workload>
        """
        xml_root, xml_tree = self._create_element_tree(template=main_template)
        workload_name = xml_root.get("name")
        self._apply_mcg_auth(xml_root)
        start_container = start_container if start_container else self.init_container
        start_object = start_object if start_object else self.init_object
        for stage in xml_root.iter("work"):
            stage.set("workers", f"{workers}")
            for operation, ratio in operation_type.items():
                if operation == "write" or "delete":
                    if extend_objects:
                        start_object = objects + 1
                        stage_config = self.generate_stage_config(
                            selector,
                            start_container,
                            containers,
                            start_object,
                            extend_objects,
                        )
                        attributes = {
                            "type":
                            f"{operation}",
                            "ratio":
                            f"{ratio}",
                            "config":
                            f"cprefix={prefix};{stage_config};sizes=c({str(size)}){size_unit}",
                        }
                        ElementTree.SubElement(stage, "operation", attributes)
                    else:
                        stage_config = self.generate_stage_config(
                            selector,
                            start_container,
                            containers,
                            start_object,
                            objects,
                        )

                        attributes = {
                            "type":
                            f"{operation}",
                            "ratio":
                            f"{ratio}",
                            "config":
                            f"cprefix={prefix};{stage_config};sizes=c({str(size)}){size_unit}",
                        }
                        ElementTree.SubElement(stage, "operation", attributes)
                else:
                    stage_config = self.generate_stage_config(
                        selector,
                        start_container,
                        containers,
                        start_object,
                        objects,
                    )
                    attributes = {
                        "type": f"{operation}",
                        "ratio": f"{ratio}",
                        "config": f"cprefix={prefix};{stage_config}",
                    }
                    ElementTree.SubElement(stage, "operation", attributes)

        self._create_tmp_xml(xml_tree=xml_tree, xml_file_prefix=workload_name)
        self.submit_workload(workload_path=self.xml_file)
        self.wait_for_workload(workload_id=self.workload_id,
                               sleep=sleep,
                               timeout=timeout)
        if validate:
            self.validate_workload(workload_id=self.workload_id,
                                   workload_name=workload_name)
        else:
            return self.workload_id, workload_name

        if result:
            throughput, bandwidth = self.get_performance_result(
                workload_id=self.workload_id,
                workload_name=workload_name,
                size=size,
            )
            return throughput, bandwidth
        else:
            return self.workload_id, workload_name

    @staticmethod
    def generate_stage_config(selector, start_container, end_container,
                              start_objects, end_object):
        """
        Generates config which is used in stage creation

        Args:
            selector (str): The way object is accessed/selected. u=uniform, r=range, s=sequential.
            start_container (int): Start of containers
            end_container (int): End of containers
            start_objects (int): Start of objects
            end_object (int): End of objects

        Returns:
            (str): Container and object configuration

        """
        xml_config = (
            f"containers={selector}({str(start_container)},{str(end_container)});"
            f"objects={selector}({str(start_objects)},{str(end_object)})")
        return xml_config

    @staticmethod
    def generate_container_stage_config(selector, start_container,
                                        end_container):
        """
        Generates container config which creates buckets in bulk

        Args:
            selector (str): The way object is accessed/selected. u=uniform, r=range, s=sequential.
            start_container (int): Start of containers
            end_container (int): End of containers

        Returns:
            (str): Container and object configuration

        """
        container_config = (
            f"containers={selector}({str(start_container)},{str(end_container)});"
        )
        return container_config

    def _create_tmp_xml(self, xml_tree, xml_file_prefix):
        """
        Creates a xml file and writes the workload

        Args:
            xml_file_prefix (str): Prefix of xml file
            xml_tree (Element): Element tree

        """
        self.xml_file = NamedTemporaryFile(
            dir=self.cosbench_dir,
            prefix=f"{xml_file_prefix}",
            suffix=".xml",
            delete=False,
        ).name
        logger.info(self.xml_file)
        xml_tree.write(self.xml_file)

    @staticmethod
    def _create_element_tree(template):
        """
        Creates element tree and root element of xml

        Args:
            template (str): Template of Cosbench workload

        Returns:
            Tuple[Element, ElementTree]: Root element and element tree of xml

        """
        xml_root = ElementTree.fromstring(text=template)
        xml_tree = ElementTree.ElementTree(element=xml_root)
        return xml_root, xml_tree

    def _copy_workload(self, workload_path):
        """
        Copies workload xml to Cosbench pod

        Args:
            workload_path (str): Absolute path of xml to copy

        """
        self.ocp_obj.exec_oc_cmd(
            command=f"cp {workload_path} {self.cosbench_pod.name}:/cos",
            out_yaml_format=False,
            timeout=180,
        )

    def submit_workload(self, workload_path):
        """
        Submits Cosbench xml to initiate workload

        Args:
            workload_path (str): Absolute path of xml to submit

        """
        self._copy_workload(workload_path=workload_path)
        workload = os.path.split(workload_path)[1]
        self._cosbench_cli(workload)

    @retry(AttributeError, tries=15, delay=5, backoff=1)
    def _cosbench_cli(self, workload):
        """
        Runs Cosbench cli to initiate workload

        Args:
            workload (str): Workload file

        """
        submit_key = "Accepted with ID"
        cobench_pod_obj = get_pod_obj(name=self.cosbench_pod.name,
                                      namespace=self.namespace)
        submit = cobench_pod_obj.exec_cmd_on_pod(
            command=f"/cos/cli.sh submit /cos/{workload}",
            out_yaml_format=True,
            timeout=180,
        )
        if submit_key in submit.keys():
            self.workload_id = submit[submit_key]
        else:
            assert f"Failed to submit the workload, ID not found. stdout: {submit}"

    def wait_for_workload(self, workload_id, sleep=1, timeout=60):
        """
        Waits for the cosbench workload to complete

        Args:
            workload_id (str): ID of cosbench workload
            sleep: sleep in seconds
            timeout: timeout in seconds to check if mirroring

        Returns:
            bool: Whether cosbench workload processed successfully

        """
        logger.info(f"Waiting for workload {workload_id} to be processed")
        pattern = f"sucessfully processed workload {workload_id}"
        try:
            for ret in TimeoutSampler(
                    timeout=timeout,
                    sleep=sleep,
                    func=get_pod_logs,
                    pod_name=self.cosbench_pod.name,
                    namespace=self.namespace,
            ):
                if re.search(pattern=pattern, string=ret):
                    break
            logger.info(
                f"Verified: Workload {workload_id} processed successfully")
            return True
        except TimeoutExpiredError:
            logger.error(
                f"Workload {workload_id} did not complete. Dumping cosbench pod log"
            )
            # Log cosbench pod for debugging purpose
            cosbench_log = get_pod_logs(pod_name=self.cosbench_pod.name,
                                        namespace=self.namespace)
            logger.debug(cosbench_log)
            return False

    def validate_workload(self, workload_id, workload_name):
        """
        Validates each stage of cosbench workload

        Args:
            workload_id (str): ID of cosbench workload
            workload_name (str): Name of the workload

        Raises:
            UnexpectedBehaviour: When workload csv is incorrect/malformed.

        """
        workload_csv = self.get_result_csv(workload_id=workload_id,
                                           workload_name=workload_name)
        with open(workload_csv, "r") as file:
            reader = csv.reader(file)
            header = next(reader)
            if header is not None:
                # Iterate over each row after the header
                logger.info(
                    f"Verifying whether each stage of workload {workload_id} completed"
                )
                for row in reader:
                    if row[16] == "completed":
                        logger.info(f"Stage {row[0]} completed successfully")
                    else:
                        assert (
                            f"Failed: Stage {row[0]} did not complete. Status {row[16]}"
                        )
            else:
                raise UnexpectedBehaviour(
                    f"Workload csv is incorrect/malformed. Dumping csv {reader}"
                )

    def get_result_csv(self, workload_id, workload_name):
        """
        Gets cosbench workload result csv

        Args:
            workload_id (str): ID of cosbench workload
            workload_name (str): Name of the workload

        Returns:
            str: Absolute path of the result csv

        """
        archive_file = f"{workload_id}-{workload_name}"
        cmd = (
            f"cp {self.cosbench_pod.name}:/cos/archive/{archive_file}/{archive_file}.csv "
            f"{self.cosbench_dir}/{archive_file}.csv ")
        self.ocp_obj.exec_oc_cmd(
            command=cmd,
            out_yaml_format=False,
            timeout=300,
        )
        return f"{self.cosbench_dir}/{archive_file}.csv"

    def cleanup(self):
        """
        Cosbench cleanup

        """
        switch_to_project(constants.COSBENCH_PROJECT)
        logger.info("Deleting Cosbench pod, configmap and namespace")
        self.cosbench_pod.delete()
        self.cosbench_config.delete()
        self.ns_obj.delete_project(self.namespace)
        self.ns_obj.wait_for_delete(resource_name=self.namespace, timeout=90)

    def get_performance_result(self, workload_name, workload_id, size):
        workload_file = self.get_result_csv(workload_id=workload_id,
                                            workload_name=workload_name)
        throughput_data = {}
        bandwidth_data = {}
        with open(workload_file, "r") as file:
            reader = csv.reader(file)
            header = next(reader)
            if header is not None:
                for row in reader:
                    throughput_data[row[1]] = row[13]
                    bandwidth_data[row[1]] = row[14]
            else:
                raise UnexpectedBehaviour(
                    f"Workload csv is incorrect/malformed. Dumping csv {reader}"
                )
        # Store throughput data on csv file
        log_path = f"{self.cosbench_dir}"
        with open(f"{log_path}/{workload_name}-{size}-throughput.csv",
                  "a") as fd:
            csv_obj = csv.writer(fd)
            for k, v in throughput_data.items():
                csv_obj.writerow([k, v])
        logger.info(
            f"Throughput data present in {log_path}/{workload_name}-{size}-throughput.csv"
        )

        # Store bandwidth data on csv file
        with open(f"{log_path}/{workload_name}-{size}-bandwidth.csv",
                  "a") as fd:
            csv_obj = csv.writer(fd)
            for k, v in bandwidth_data.items():
                csv_obj.writerow([k, v])
        logger.info(
            f"Bandwidth data present in {log_path}/{workload_name}-{size}-bandwidth.csv"
        )
        return throughput_data, bandwidth_data

    def cosbench_full(self):
        """
        Run full Cosbench workload
        """
        bucket_prefix = "bucket-"
        buckets = 10
        objects = 1000

        # Operations to perform and its ratio(%)
        operations = {"read": 50, "write": 50}

        # Deployment of cosbench
        self.setup_cosbench()

        # Create initial containers and objects
        self.run_init_workload(prefix=bucket_prefix,
                               containers=buckets,
                               objects=objects,
                               validate=True)
        # Start measuring time
        start_time = datetime.now()

        # Run main workload
        self.run_main_workload(
            operation_type=operations,
            prefix=bucket_prefix,
            containers=buckets,
            objects=objects,
            validate=True,
            timeout=10800,
        )

        # Calculate the total run time of Cosbench workload
        end_time = datetime.now()
        diff_time = end_time - start_time
        logger.info(f"Cosbench workload completed after {diff_time}")

        # Dispose containers and objects
        self.run_cleanup_workload(prefix=bucket_prefix,
                                  containers=buckets,
                                  objects=objects,
                                  validate=True)
Beispiel #4
0
class CouchBase(PillowFight):
    """
    CouchBase workload operation
    """
    def __init__(self, **kwargs):
        """
        Initializer function

        """
        super().__init__(**kwargs)
        self.args = kwargs
        self.pod_obj = OCP(kind="pod")
        self.ns_obj = OCP(kind="namespace")
        self.couchbase_pod = OCP(kind="pod")
        self.create_namespace(namespace=constants.COUCHBASE_OPERATOR)
        self.cb_create_cb_secret = False
        self.cb_create_cb_cluster = False
        self.cb_create_bucket = False

    def create_namespace(self, namespace):
        """
        create namespace for couchbase

        Args:
            namespace (str): Namespace for deploying couchbase pods

        """
        try:
            self.ns_obj.new_project(namespace)
        except CommandFailed as ef:
            log.info("Already present")
            if f'project.project.openshift.io "{namespace}" already exists' not in str(
                    ef):
                raise ef

    def couchbase_operatorgroup(self):
        """
        Creates an operator group for Couchbase

        """
        operatorgroup_yaml = templating.load_yaml(
            constants.COUCHBASE_OPERATOR_GROUP_YAML)
        self.operatorgroup_yaml = OCS(**operatorgroup_yaml)
        self.operatorgroup_yaml.create()

    def couchbase_subscription(self):
        """
        Creates subscription for Couchbase operator

        """
        # Create an operator group for Couchbase
        log.info("Creating operator group for couchbase")
        self.couchbase_operatorgroup()
        subscription_yaml = templating.load_yaml(
            constants.COUCHBASE_OPERATOR_SUBSCRIPTION_YAML)
        self.subscription_yaml = OCS(**subscription_yaml)
        self.subscription_yaml.create()

        # Wait for the CSV to reach succeeded state
        cb_csv = self.get_couchbase_csv()
        cb_csv_obj = CSV(resource_name=cb_csv,
                         namespace=constants.COUCHBASE_OPERATOR)
        cb_csv_obj.wait_for_phase("Succeeded", timeout=720)

    def get_couchbase_csv(self):
        """ "
        Get the Couchbase CSV object

        Returns:
            CSV: Couchbase CSV object

        Raises:
            CSVNotFound: In case no CSV found.

        """
        cb_package_manifest = PackageManifest(
            resource_name="couchbase-enterprise-certified")
        cb_enter_csv = cb_package_manifest.get_current_csv(
            channel="stable", csv_pattern=constants.COUCHBASE_CSV_PREFIX)
        return cb_enter_csv

    def create_cb_secrets(self):
        """ "
        Create secrets for running Couchbase workers

        """
        cb_secrets = templating.load_yaml(constants.COUCHBASE_WORKER_SECRET)
        self.cb_secrets = OCS(**cb_secrets)
        self.cb_secrets.create()
        log.info("Successfully created secrets for Couchbase")
        self.cb_create_cb_secret = True

    def create_cb_cluster(self, replicas=1, sc_name=None):
        """
        Deploy a Couchbase server using Couchbase operator

        Once the couchbase operator is running, we need to wait for the
        worker pods to be up.  Once the Couchbase worker pods are up, pillowfight
        task is started.

        After the pillowfight task has finished, the log is collected and
        analyzed.

        Raises:
            Exception: If pillowfight results indicate that a minimum performance
                level is not reached (1 second response time, less than 1000 ops
                per second)

        """
        log.info("Creating Couchbase worker pods...")
        cb_example = templating.load_yaml(constants.COUCHBASE_WORKER_EXAMPLE)

        if storagecluster_independent_check():
            cb_example["spec"]["volumeClaimTemplates"][0]["spec"][
                "storageClassName"] = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
        cb_example["spec"]["servers"][0]["size"] = replicas
        if sc_name:
            cb_example["spec"]["volumeClaimTemplates"][0]["spec"][
                "storageClassName"] = sc_name
        self.cb_example = OCS(**cb_example)
        self.cb_example.create()
        self.cb_create_cb_cluster = True

        # Wait for the Couchbase workers to be running.

        log.info("Waiting for the Couchbase pods to be Running")
        self.pod_obj.wait_for_resource(
            condition="Running",
            selector="app=couchbase",
            resource_count=replicas,
            timeout=900,
        )
        log.info(
            f"Expected number: {replicas} of couchbase workers reached running state"
        )

    def create_data_buckets(self):
        """
        Create data buckets

        """
        cb_bucket = templating.load_yaml(constants.COUCHBASE_DATA_BUCKET)
        self.cb_bucket = OCS(**cb_bucket)
        self.cb_bucket.create()
        log.info("Successfully created data buckets")
        self.cb_create_bucket = True

    def run_workload(self,
                     replicas,
                     num_items=None,
                     num_threads=None,
                     run_in_bg=False):
        """
        Running workload with pillow fight operator
        Args:
            replicas (int): Number of pods
            num_items (int): Number of items to be loaded to the cluster
            num_threads (int): Number of threads
            run_in_bg (bool) : Optional run IOs in background

        """
        self.result = None
        log.info("Running IOs using Pillow-fight")
        if run_in_bg:
            executor = ThreadPoolExecutor(1)
            self.result = executor.submit(
                PillowFight.run_pillowfights,
                self,
                replicas=replicas,
                num_items=num_items,
                num_threads=num_threads,
            )
            return self.result
        PillowFight.run_pillowfights(self,
                                     replicas=replicas,
                                     num_items=num_items,
                                     num_threads=num_threads)

    def analyze_run(self, skip_analyze=False):
        """
        Analyzing the workload run logs

        Args:
            skip_analyze (bool): Option to skip logs analysis

        """
        if not skip_analyze:
            log.info("Analyzing  workload run logs..")
            PillowFight.analyze_all(self)

    def respin_couchbase_app_pod(self):
        """
        Respin the couchbase app pod

        Returns:
            pod status

        """
        app_pod_list = get_pod_name_by_pattern("cb-example",
                                               constants.COUCHBASE_OPERATOR)
        app_pod = app_pod_list[random.randint(0, len(app_pod_list) - 1)]
        log.info(f"respin pod {app_pod}")
        app_pod_obj = get_pod_obj(app_pod,
                                  namespace=constants.COUCHBASE_OPERATOR)
        app_pod_obj.delete(wait=True, force=False)
        wait_for_resource_state(resource=app_pod_obj,
                                state=constants.STATUS_RUNNING,
                                timeout=300)

    def get_couchbase_nodes(self):
        """
        Get nodes that contain a couchbase app pod

        Returns:
            list: List of nodes

        """
        app_pods_list = get_pod_name_by_pattern("cb-example",
                                                constants.COUCHBASE_OPERATOR)
        app_pod_objs = list()
        for pod in app_pods_list:
            app_pod_objs.append(
                get_pod_obj(pod, namespace=constants.COUCHBASE_OPERATOR))
        log.info("Create a list of nodes that contain a couchbase app pod")
        nodes_set = set()
        for pod in app_pod_objs:
            log.info(f"pod {pod.name} located on "
                     f"node {pod.get().get('spec').get('nodeName')}")
            nodes_set.add(pod.get().get("spec").get("nodeName"))
        return list(nodes_set)

    def teardown(self):
        """
        Cleaning up the resources created during Couchbase deployment

        """
        if self.cb_create_cb_secret:
            self.cb_secrets.delete()
        if self.cb_create_cb_cluster:
            self.cb_example.delete()
        if self.cb_create_bucket:
            self.cb_bucket.delete()
        self.subscription_yaml.delete()
        switch_to_project("default")
        self.ns_obj.delete_project(constants.COUCHBASE_OPERATOR)
        self.ns_obj.wait_for_delete(resource_name=constants.COUCHBASE_OPERATOR,
                                    timeout=90)
        PillowFight.cleanup(self)
        switch_to_default_rook_cluster_project()
Beispiel #5
0
class ElasticSearch(object):
    """
    ElasticSearch Environment
    """
    def __init__(self):
        """
        Initializer function

        """
        log.info("Initializing the Elastic-Search environment object")
        self.namespace = "elastic-system"
        self.eck_file = "ocs_ci/templates/app-pods/eck.1.3.1-all-in-one.yaml"
        self.pvc = "ocs_ci/templates/app-pods/es-pvc.yaml"
        self.crd = "ocs_ci/templates/app-pods/esq.yaml"
        self.lspid = None

        # Creating some different types of OCP objects
        self.ocp = OCP(kind="pod",
                       resource_name="elastic-operator-0",
                       namespace=self.namespace)
        self.ns_obj = OCP(kind="namespace", namespace=self.namespace)
        self.es = OCP(resource_name="quickstart-es-http",
                      namespace=self.namespace)
        self.elasticsearch = OCP(namespace=self.namespace,
                                 kind="elasticsearch")
        self.password = OCP(
            kind="secret",
            resource_name="quickstart-es-elastic-user",
            namespace=self.namespace,
        )

        # Deploy the ECK all-in-one.yaml file
        self._deploy_eck()
        # Deploy the Elastic-Search server
        self._deploy_es()

        # Verify that ES is Up & Running
        timeout = 600
        while timeout > 0:
            if self.get_health():
                log.info("The ElasticSearch server is ready !")
                break
            else:
                log.warning("The ElasticSearch server is not ready yet")
                log.info("going to sleep for 30 sec. before next check")
                time.sleep(30)
                timeout -= 30

        # Starting LocalServer process - port forwarding
        self.local_server()

        # Connect to the server
        self.con = self._es_connect()

    def _deploy_eck(self):
        """
        Deploying the ECK environment for the Elasticsearch, and make sure it
        is in Running mode

        """

        log.info("Deploying the ECK environment for the ES cluster")
        self.ocp.apply(self.eck_file)

        for es_pod in TimeoutSampler(300, 10, get_pod_name_by_pattern,
                                     "elastic-operator", self.namespace):
            try:
                if es_pod[0] is not None:
                    self.eckpod = es_pod[0]
                    log.info(f"The ECK pod {self.eckpod} is ready !")
                    break
            except IndexError:
                log.info("ECK operator pod not ready yet")

    def get_ip(self):
        """
        This function return the IP address of the Elasticsearch cluster.
        this IP is to use inside the OCP cluster

        Return
            str : String that represent the Ip Address.

        """
        return self.es.get()["spec"]["clusterIP"]

    def get_port(self):
        """
        This function return the port of the Elasticsearch cluster.

        Return
            str : String that represent the port.

        """
        return self.es.get()["spec"]["ports"][0]["port"]

    def _deploy_es(self):
        log.info("Deploy the PVC for the ElasticSearch cluster")
        self.ocp.apply(self.pvc)

        log.info("Deploy the ElasticSearch cluster")
        self.ocp.apply(self.crd)

        for es_pod in TimeoutSampler(300, 20, get_pod_name_by_pattern,
                                     "quickstart-es-default", self.namespace):
            try:
                if es_pod[0] is not None:
                    self.espod = es_pod[0]
                    log.info(f"The ElasticSearch pod {self.espod} Started")
                    break
            except IndexError:
                log.info("elasticsearch pod not ready yet")

        es_pod = OCP(kind="pod", namespace=self.namespace)
        log.info("Waiting for ElasticSearch to Run")
        assert es_pod.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            resource_name=self.espod,
            sleep=30,
            timeout=600,
        )
        log.info("Elastic Search is ready !!!")

    def get_health(self):
        """
        This method return the health status of the Elasticsearch.

        Returns:
            bool : True if the status is green (OK) otherwise - False

        """
        return self.elasticsearch.get(
        )["items"][0]["status"]["health"] == "green"

    def get_password(self):
        """
        This method return the password used to connect the Elasticsearch.

        Returns:
            str : The password as text

        """
        return base64.b64decode(
            self.password.get()["data"]["elastic"]).decode("utf-8")

    def cleanup(self):
        """
        Cleanup the environment from all Elasticsearch components, and from the
        port forwarding process.

        """
        log.info("Teardown the Elasticsearch environment")
        log.info(f"Killing the local server process ({self.lspid})")
        os.kill(self.lspid, signal.SIGKILL)
        log.info("Deleting all resources")
        subprocess.run(f"oc delete -f {self.crd}", shell=True)
        self.ns_obj.delete_project(project_name=self.namespace)
        self.ns_obj.wait_for_delete(resource_name=self.namespace, timeout=180)

    def local_server(self):
        """
        Starting sub-process that will do port-forwarding, to allow access from
        outside the open-shift cluster into the Elasticsearch server.

        """
        cmd = f"oc -n {self.namespace } "
        cmd += f"port-forward service/quickstart-es-http {self.get_port()}"
        log.info(f"Going to run : {cmd}")
        proc = subprocess.Popen(cmd, shell=True)
        log.info(f"Starting LocalServer with PID of {proc.pid}")
        self.lspid = proc.pid

    def _es_connect(self):
        """
        Create a connection to the ES via the localhost port-fwd

        Returns:
            Elasticsearch: elasticsearch connection object

        Raise:
            ConnectionError: if can not connect to the server

        """
        try:
            es = Elasticsearch([{
                "host": "localhost",
                "port": self.get_port()
            }])
        except esexp.ConnectionError:
            log.error("Can not connect to ES server in the LocalServer")
            raise
        return es

    def get_indices(self):
        """
        Getting list of all indices in the ES server - all created by the test,
        the installation of the ES was without any indexes pre-installed.

        Returns:
            list : list of all indices defined in the ES server

        """
        results = []
        log.info("Getting all indices")
        for ind in self.con.indices.get_alias("*"):
            results.append(ind)
        return results

    def _copy(self, es):
        """
        Copy All data from the internal ES server to the main ES

        Args:
            es (obj): elasticsearch object which connected to the main ES

        """

        query = {"size": 1000, "query": {"match_all": {}}}
        for ind in self.get_indices():
            log.info(f"Reading {ind} from internal ES server")
            try:
                result = self.con.search(index=ind, body=query)
            except esexp.NotFoundError:
                log.warning(f"{ind} Not found in the Internal ES.")
                continue

            log.debug(f"The results from internal ES for {ind} are :{result}")
            log.info(f"Writing {ind} into main ES server")
            for doc in result["hits"]["hits"]:
                log.debug(f"Going to write : {doc}")
                es.index(index=ind, doc_type="_doc", body=doc["_source"])
class ElasticSearch(object):
    """
    ElasticSearch Environment
    """
    def __init__(self, **kwargs):
        """
        Initializer function

        """
        log.info("Initializing the Elastic-Search environment object")
        self.args = kwargs
        self.namespace = "elastic-system"
        self.repo = self.args.get("repo", constants.OCS_WORKLOADS)
        self.branch = self.args.get("branch", "master")
        self.dir = tempfile.mkdtemp(prefix="eck_")

        # Clone the ECK repo locally
        self._clone()

        self.eck_path = os.path.join(self.dir, "ocs-workloads/eck")
        self.eck_file = os.path.join(self.eck_path, "crds.yaml")
        self.dumper_file = os.path.join(constants.TEMPLATE_APP_POD_DIR,
                                        "esclient.yaml")
        self.crd = os.path.join(constants.TEMPLATE_APP_POD_DIR, "esq.yaml")

        # Creating some different types of OCP objects
        self.ocp = OCP(kind="pod",
                       resource_name="elastic-operator-0",
                       namespace=self.namespace)
        self.ns_obj = OCP(kind="namespace", namespace=self.namespace)

        # Verify that the namespace dose not exist, delete it if it is exists.
        if self.ns_obj.get(dont_raise=True,
                           resource_name=self.namespace) is not None:
            log.warning(
                f"The {self.namespace} namespace is already exists!, try to delete it"
            )
            self.ns_obj.delete_project(project_name=self.namespace)
            self.ns_obj.wait_for_delete(resource_name=self.namespace,
                                        timeout=180,
                                        sleep=5)

        self.es = OCP(resource_name="quickstart-es-http",
                      namespace=self.namespace)
        self.elasticsearch = OCP(namespace=self.namespace,
                                 kind="elasticsearch")
        self.password = OCP(
            kind="secret",
            resource_name="quickstart-es-elastic-user",
            namespace=self.namespace,
        )

        # Deploy the ECK all-in-one.yaml file
        self._deploy_eck()

        # Deploy the Elastic-Search server
        if not self._deploy_es():
            self.cleanup()
            raise ElasticSearchNotDeployed("Elasticsearch deployment Failed")

        # Verify that ES is Up & Running
        sample = TimeoutSampler(timeout=180, sleep=10, func=self.get_health)
        if not sample.wait_for_func_status(True):
            self.cleanup()
            raise ElasticSearchNotDeployed("Elasticsearch deployment Failed")

        # Deploy the elasticsearch dumper pod
        self._deploy_data_dumper_client()

        # Connect to the server
        self.con = self._es_connect()

    def _clone(self):
        """
        clone the ECK repo into temp directory

        """
        try:
            log.info(f"Cloning ECK in {self.dir}")
            git_clone_cmd = f"git clone -b {self.branch} {self.repo} --depth 1"
            run(git_clone_cmd, shell=True, cwd=self.dir, check=True)
        except (CommandFailed, CalledProcessError) as cf:
            log.error("Error during cloning of ECK repository")
            raise cf

    def _pod_is_found(self, pattern):
        """
        Boolean function which check if pod (by pattern) is exist.

        Args:
            pattern (str): the pattern of the pod to look for

        Returns:
            bool : True if pod found, otherwise False
        """
        return len(get_pod_name_by_pattern(pattern, self.namespace)) > 0

    def _deploy_eck(self):
        """
        Deploying the ECK environment for the Elasticsearch, and make sure it
        is in Running mode

        """

        log.info("Deploying the ECK environment for the ES cluster")
        log.info("Deploy the ECK CRD's")
        self.ocp.apply(self.eck_file)
        log.info("deploy the ECK operator")
        self.ocp.apply(f"{self.eck_path}/operator.yaml")
        sample = TimeoutSampler(timeout=300,
                                sleep=10,
                                func=self._pod_is_found,
                                pattern="elastic-operator")
        if not sample.wait_for_func_status(True):
            err_msg = "ECK deployment Failed"
            log.error(err_msg)
            self.cleanup()
            raise Exception(err_msg)

        log.info("The ECK pod is ready !")

    def _deploy_data_dumper_client(self):
        """
        Deploying elastic search client pod with utility which dump all the data
        from the server to .tgz file

        """

        log.info("Deploying the es client for dumping all data")
        self.ocp.apply(self.dumper_file)

        sample = TimeoutSampler(timeout=300,
                                sleep=10,
                                func=self._pod_is_found,
                                pattern="es-dumper")
        if not sample.wait_for_func_status(True):
            self.cleanup()
            raise Exception("Dumper pod deployment Failed")
        self.dump_pod = get_pod_name_by_pattern("es-dumper", self.namespace)[0]
        log.info(f"The dumper client pod {self.dump_pod} is ready !")

    def get_ip(self):
        """
        This function return the IP address of the Elasticsearch cluster.
        this IP is to use inside the OCP cluster

        Return
            str : String that represent the Ip Address.

        """
        return self.es.get()["spec"]["clusterIP"]

    def get_port(self):
        """
        This function return the port of the Elasticsearch cluster.

        Return
            str : String that represent the port.

        """
        return self.es.get()["spec"]["ports"][0]["port"]

    def _deploy_es(self):
        """
        Deploying the Elasticsearch server

        """

        # Creating PVC for the elasticsearch server and wait until it bound
        log.info("Creating 10 GiB PVC for the ElasticSearch cluster on")
        try:
            self.pvc_obj = create_pvc(
                sc_name=self.args.get("sc") or constants.CEPHBLOCKPOOL_SC,
                namespace=self.namespace,
                pvc_name="elasticsearch-data-quickstart-es-default-0",
                access_mode=constants.ACCESS_MODE_RWO,
                size="10Gi",
            )

            # Make sure the PVC bound, or delete it and raise exception
            wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
        except ResourceWrongStatusException:
            log.error("The PVC couldn't created")
            return False

        self.pvc_obj.reload()

        log.info("Deploy the ElasticSearch cluster")
        self.ocp.apply(self.crd)

        sample = TimeoutSampler(
            timeout=300,
            sleep=10,
            func=self._pod_is_found,
            pattern="quickstart-es-default",
        )
        if not sample.wait_for_func_status(True):
            log.error("The ElasticSearch pod deployment Failed")
            return False

        self.espod = get_pod_name_by_pattern("quickstart-es-default",
                                             self.namespace)[0]
        log.info(f"The ElasticSearch pod {self.espod} Started")

        es_pod = OCP(kind="pod", namespace=self.namespace)
        log.info("Waiting for ElasticSearch to Run")
        if not es_pod.wait_for_resource(
                condition=constants.STATUS_RUNNING,
                resource_name=self.espod,
                sleep=30,
                timeout=600,
        ):
            log.error("TThe ElasticSearch pod is not running !")
            return False
        else:
            log.info("Elastic Search is ready !!!")
            return True

    def get_health(self):
        """
        This method return the health status of the Elasticsearch.

        Returns:
            bool : True if the status is green (OK) otherwise - False

        """
        return self.elasticsearch.get(
        )["items"][0]["status"]["health"] == "green"

    def get_password(self):
        """
        This method return the password used to connect the Elasticsearch.

        Returns:
            str : The password as text

        """
        return base64.b64decode(
            self.password.get()["data"]["elastic"]).decode("utf-8")

    def cleanup(self):
        """
        Cleanup the environment from all Elasticsearch components.

        """
        log.info("Teardown the Elasticsearch environment")
        log.info("Deleting all resources")
        try:
            log.info("Deleting the dumper client pod")
            self.ocp.delete(yaml_file=self.dumper_file)
        except CommandFailed:
            # in case of the es-dumper did not deployed yet, trying to delete it
            # will failed.
            log.warning("es-dumper pod does not exist")
            pass

        try:
            log.info("Deleting the es resource")
            self.ocp.delete(yaml_file=self.crd)
        except CommandFailed:
            # in case of the elastic-search did not deployed yet, trying to
            # delete it will failed.
            log.warning("elastic-search pod does not exist")
            pass

        log.info("Deleting the es project")
        # self.ns_obj.delete_project(project_name=self.namespace)
        self.ocp.delete(f"{self.eck_path}/operator.yaml")
        self.ocp.delete(yaml_file=self.eck_file)
        self.ns_obj.wait_for_delete(resource_name=self.namespace, timeout=180)

        log.info("The ElasticSearch cleaned up from the cluster")

    def _es_connect(self):
        """
        Create a connection to the local ES

        Returns:
            Elasticsearch: elasticsearch connection object, None if Cannot connect to ES

        """
        try:
            es = Elasticsearch([{
                "host": self.get_ip(),
                "port": self.get_port()
            }])
        except esexp.ConnectionError:
            log.warning("Cannot connect to ES server in the LocalServer")
            es = None
        return es

    def get_indices(self):
        """
        Getting list of all indices in the ES server - all created by the test,
        the installation of the ES was without any indexes pre-installed.

        Returns:
            list : list of all indices defined in the ES server

        """
        results = []
        log.info("Getting all indices")
        for ind in self.con.indices.get_alias("*"):
            results.append(ind)
        return results

    def dumping_all_data(self, target_path):
        """
        Dump All data from the internal ES server to .tgz file.

        Args:
            target_path (str): the path where the results file will be copy into

        Return:
            bool: True if the dump operation succeed and return the results data to the host
                  otherwise False
        """

        log.info("dumping data from ES server to .tgz file")
        rsh_cmd = f"rsh {self.dump_pod} /elasticsearch-dump/esdumper.py --ip {self.get_ip()} --port {self.get_port()}"
        result = self.ocp.exec_oc_cmd(rsh_cmd,
                                      out_yaml_format=False,
                                      timeout=1200)
        if "ES dump is done." not in result:
            log.error("There is no data in the Elasticsearch server")
            return False
        else:
            src_file = result.split()[-1]
            log.info(f"Copy {src_file} from the client pod")

            cp_command = f"cp {self.dump_pod}:{src_file} {target_path}/FullResults.tgz"
            result = self.ocp.exec_oc_cmd(cp_command, timeout=120)
            log.info(f"The output from the POD is {result}")
            log.info("Extracting the FullResults.tgz file")
            kwargs = {"cwd": target_path}
            results = run_command(f"tar zxvf {target_path}/FullResults.tgz",
                                  **kwargs)
            log.debug(f"The untar results is {results}")
            if "Error in command" in results:
                log.warning("Cannot untar the dumped file")
                return False

        return True