Ejemplo n.º 1
0
    def _deploy_es(self):
        """
        Deploying the Elasticsearch server

        """

        # Creating PVC for the elasticsearch server and wait until it bound
        log.info("Creating 10 GiB PVC for the ElasticSearch cluster on")
        self.pvc_obj = create_pvc(
            sc_name=constants.CEPHBLOCKPOOL_SC,
            namespace=self.namespace,
            pvc_name="elasticsearch-data-quickstart-es-default-0",
            access_mode=constants.ACCESS_MODE_RWO,
            size="10Gi",
        )
        wait_for_resource_state(self.pvc_obj, constants.STATUS_BOUND)
        self.pvc_obj.reload()

        log.info("Deploy the ElasticSearch cluster")
        self.ocp.apply(self.crd)

        sample = TimeoutSampler(
            timeout=300,
            sleep=10,
            func=self._pod_is_found,
            pattern="quickstart-es-default",
        )
        if not sample.wait_for_func_status(True):
            raise Exception("The ElasticSearch pod deployment Failed")
        self.espod = get_pod_name_by_pattern("quickstart-es-default",
                                             self.namespace)[0]
        log.info(f"The ElasticSearch pod {self.espod} Started")

        es_pod = OCP(kind="pod", namespace=self.namespace)
        log.info("Waiting for ElasticSearch to Run")
        assert es_pod.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            resource_name=self.espod,
            sleep=30,
            timeout=600,
        )
        log.info("Elastic Search is ready !!!")
Ejemplo n.º 2
0
    def wait_for_phase(self, phase, timeout=300, sleep=5):
        """
        Wait till phase of CSV resource is the same as required one passed in
        the phase parameter.

        Args:
            phase (str): Desired phase of CSV object
            timeout (int): Timeout in seconds to wait for desired phase
            sleep (int): Time in seconds to sleep between attempts

        Raises:
            ResourceInUnexpectedState: In case the CSV is not in expected
                phase.

        """
        self.check_name_is_specified()
        sampler = TimeoutSampler(timeout, sleep, self.check_phase, phase=phase)
        if not sampler.wait_for_func_status(True):
            raise ResourceInUnexpectedState(
                f"CSV: {self.resource_name} is not in expected phase: {phase}")
Ejemplo n.º 3
0
    def start_vms(self, vms, wait=True):
        """
        Start VMs

        Args:
            vms (list): VM (vm) objects
            wait (bool): Wait for VMs to start

        """
        logger.info(f"Powering on VMs: {[vm.name for vm in vms]}")
        tasks = [vm.PowerOn() for vm in vms]
        WaitForTasks(tasks, self._si)

        if wait:
            for ips in TimeoutSampler(240, 3, self.get_vms_ips, vms):
                logger.info(
                    f"Waiting for VMs {[vm.name for vm in vms]} to power on "
                    f"based on network connectivity. Current VMs IPs: {ips}")
                if not (None in ips or "<unset>" in ips):
                    break
Ejemplo n.º 4
0
def test_ts_func_exception(caplog):
    """
    Check that TimeoutSampler handles exception raised during iteration.
    """
    timeout = 2
    sleep_time = 1

    def func():
        raise Exception("oh no")

    results = []
    caplog.set_level(logging.ERROR)
    with pytest.raises(TimeoutExpiredError):
        for result in TimeoutSampler(timeout, sleep_time, func):
            results.append(result)
    assert results == []
    # check that exception was logged properly in each iteration
    for rec in caplog.records:
        assert rec.getMessage() == "Exception raised during iteration: oh no"
    assert len(caplog.records) == timeout
Ejemplo n.º 5
0
    def internal_delete_uls(self, name):
        """
        Deletes the Underlying Storage using the S3 API

        Args:
           name (str): The Underlying Storage name to be deleted

        """
        # Todo: rename client to resource (or find an alternative)
        sample = TimeoutSampler(timeout=30,
                                sleep=3,
                                func=self.exec_uls_deletion,
                                name=name)
        if not sample.wait_for_func_status(result=True):
            logger.error(
                f'Deletion of Underlying Storage {name} timed out. Unable to delete {name}'
            )
            raise TimeoutExpiredError
        else:
            logger.info(f'Underlying Storage {name} deleted successfully')
Ejemplo n.º 6
0
        def _wait_for_pv_backingstore_resource_deleted(namespace=None):
            """
            wait for pv backing store resources to be deleted at the end of test teardown

            Args:
                backingstore_name (str): backingstore name
                namespace (str): backing store's namespace

            """
            namespace = namespace or config.ENV_DATA["cluster_namespace"]
            sample = TimeoutSampler(
                timeout=120,
                sleep=15,
                func=_check_resources_deleted,
                namespace=namespace,
            )
            if not sample.wait_for_func_status(result=True):
                err_msg = f"{self.name} was not deleted properly, leftovers were found"
                log.error(err_msg)
                raise TimeoutExpiredError(err_msg)
Ejemplo n.º 7
0
    def _deploy_eck(self):
        """
        Deploying the ECK environment for the Elasticsearch, and make sure it
        is in Running mode

        """

        log.info('Deploying the ECK environment for the ES cluster')
        self.ocp.apply(f'{self.dir}/{self.eck_file}')

        for es_pod in TimeoutSampler(
            300, 10, get_pod_name_by_pattern, 'elastic-operator', self.namespace
        ):
            try:
                if es_pod[0] is not None:
                    self.eckpod = es_pod[0]
                    log.info(f'The ECK pod {self.eckpod} is ready !')
                    break
            except IndexError:
                log.info('ECK operator pod not ready yet')
Ejemplo n.º 8
0
 def finalizer():
     """
     Finalizer to stop memory leak data capture thread and cleanup the files
     """
     set_flag_status('terminated')
     try:
         for status in TimeoutSampler(90, 3, get_flag_status):
             if status == 'terminated':
                 break
     except TimeoutExpiredError:
         log.warning(
             "Background test execution still in progress before"
             "memory leak thread terminated"
         )
     if thread:
         thread.join()
     for worker in helpers.get_worker_nodes():
         if os.path.exists(f"/tmp/{worker}-top-output.txt"):
             os.remove(f"/tmp/{worker}-top-output.txt")
     log.info(f"Memory leak capture has stopped")
Ejemplo n.º 9
0
    def verify_page_contain_strings(self, strings_on_page, page_name):
        """
        Verify Page Contain Strings

        Args:
            strings_on_page (list): list of strings on page
            page_name (str): the name of the page

        """
        logger.info(f"verify {strings_on_page} exist on {page_name}")
        for string in strings_on_page:
            sample = TimeoutSampler(
                timeout=3,
                sleep=1,
                func=self.check_element_text,
                expected_text=string,
            )
            if not sample.wait_for_func_status(result=True):
                self.err_list.append(
                    f"{string} string not found on {page_name}")
Ejemplo n.º 10
0
def wait_for_pv_backingstore(backingstore_name, namespace=None):
    """
    wait for existing pv backing store to reach OPTIMAL state

    Args:
        backingstore_name (str): backingstore name
        namespace (str): backing store's namespace

    """

    namespace = namespace or config.ENV_DATA['cluster_namespace']
    sample = TimeoutSampler(
        timeout=240, sleep=15, func=check_pv_backingstore_status,
        backingstore_name=backingstore_name, namespace=namespace
    )
    if not sample.wait_for_func_status(result=True):
        logger.error(f'Backing Store {backingstore_name} never reached OPTIMAL state')
        raise TimeoutExpiredError
    else:
        logger.info(f'Backing Store {backingstore_name} created successfully')
Ejemplo n.º 11
0
def test_ts_wait_for_value_negative(caplog):
    """
    Check that when wait_for_value() fails to see expected return value of
    given func within given timeout, exception is raised and the problem
    logged.
    """
    timeout = 3
    sleep_time = 2
    func = lambda: 1  # noqa: E731
    ts = TimeoutSampler(timeout, sleep_time, func)
    caplog.set_level(logging.ERROR)
    with pytest.raises(TimeoutExpiredError):
        ts.wait_for_func_value(2)
    # check that the problem was logged properly
    assert len(caplog.records) == 1
    for rec in caplog.records:
        log_msg = rec.getMessage()
        assert "function <lambda> failed" in log_msg
        assert "failed to return expected value 2" in log_msg
        assert "during 3 second timeout" in log_msg
Ejemplo n.º 12
0
    def wait_for_volume_attach(self, volume):
        """
        Checks volume is attached to node or not

        Args:
            volume (str): The volume to wait for to be attached

        Returns:
            bool: True if the volume has been attached to the
                instance, False otherwise

        """
        try:
            for sample in TimeoutSampler(300, 3, self.is_volume_attached,
                                         volume):
                if sample:
                    return True
        except TimeoutExpiredError:
            logger.info("Volume is not attached to node")
            return False
Ejemplo n.º 13
0
 def verify_uls_state(self, uls_name, is_available):
     check_type = "Delete"
     if is_available:
         check_type = "Create"
     sample = TimeoutSampler(timeout=180,
                             sleep=15,
                             func=self.verify_uls_exists,
                             uls_name=uls_name)
     if sample.wait_for_func_status(result=is_available):
         logger.info(
             f"Underlying Storage {uls_name} {check_type.lower()}d successfully."
         )
     else:
         if is_available:
             raise ResourceWrongStatusException(
                 f"{check_type[:-1]}ion of Underlying Storage {uls_name} timed out. "
                 f"Unable to {check_type.lower()} {uls_name}")
         logger.warning(
             f"{uls_name} still found after 3 minutes, and might require manual removal."
         )
Ejemplo n.º 14
0
    def _deploy_data_dumper_client(self):
        """
        Deploying elastic search client pod with utility which dump all the data
        from the server to .tgz file

        """

        log.info("Deploying the es client for dumping all data")
        self.ocp.apply(self.dumper_file)

        for dmp_pod in TimeoutSampler(300, 10, get_pod_name_by_pattern,
                                      "es-dumper", self.namespace):
            try:
                if dmp_pod[0] is not None:
                    self.dump_pod = dmp_pod[0]
                    log.info(
                        f"The dumper client pod {self.dump_pod} is ready !")
                    break
            except IndexError:
                log.info("Dumper pod not ready yet")
Ejemplo n.º 15
0
    def test_cephfilesystem_creation(self, fs_setup):
        """
        Creating a Ceph Filesystem
        """
        original_active_count, fs_data, ceph_obj = fs_setup
        new_active_count = original_active_count + 1

        fs_data['spec']['metadataServer']['activeCount'] = (new_active_count)
        ceph_obj.apply(**fs_data)
        for mdss, pods in TimeoutSampler(
                60,
                5,
                get_mds_active_count,
        ):
            if mdss == new_active_count:
                if len(pods) == (new_active_count * 2):
                    log.info(f"mds and pod count reached: {mdss}, {len(pods)}")
                    return
            log.info(f"Current mds count {mdss}, pod count: {len(pods)}")
        pytest.fail("Failed to increase Active MDS count")
Ejemplo n.º 16
0
    def time_taken_to_complete_rebalance(self, timeout=600):
        """
        This function calculates the time taken to complete
        rebalance

        Args:
            timeout (int): Time to wait for the completion of rebalance

        Returns:
            int : Time taken in minutes for the completion of rebalance

        """
        start_time = time.time()
        for rebalance in TimeoutSampler(timeout=timeout,
                                        sleep=10,
                                        func=self.get_rebalance_status):
            if rebalance:
                logging.info("Rebalance is completed")
                time_taken = time.time() - start_time
                return (time_taken / 60)
Ejemplo n.º 17
0
def verify_new_pv_available_in_sc(old_pv_objs, sc_name, timeout=120):
    """
    Verify that the new pv, that has been created in a specific storage class, is available.

    Args:
        old_pv_objs (list): List of dictionaries of the pv objects
        sc_name (str): The name of the storage class
        timeout (int): time to wait for the new pv to come up

    Returns:
        bool: True if the new pv is available. False, otherwise.

    """
    try:
        for new_pv_objs in TimeoutSampler(
            timeout=timeout,
            sleep=10,
            func=get_pv_objs_in_sc,
            sc_name=sc_name,
        ):
            num_of_new_pv = len(new_pv_objs)
            expected_num_of_new_pv = len(old_pv_objs) + 1
            if num_of_new_pv == expected_num_of_new_pv:
                logger.info(f"Found {expected_num_of_new_pv} PVs as expected")
                break
    except TimeoutError:
        logger.warning(
            f"expected to find {expected_num_of_new_pv} PVs in sc {sc_name}, but find {num_of_new_pv} PVs"
        )
        return False

    old_pv_names = [get_pv_name(pv) for pv in old_pv_objs]
    new_pv_obj = [pv for pv in new_pv_objs if get_pv_name(pv) not in old_pv_names][0]
    new_pv_status = get_pv_status(new_pv_obj)
    if new_pv_status not in [constants.STATUS_AVAILABLE, constants.STATUS_BOUND]:
        logger.warning(f"New pv is in status {new_pv_status}")
        return False

    logger.info(f"new pv is ready with status {new_pv_status}")

    return True
Ejemplo n.º 18
0
    def __init__(self):
        """
        Initializer function

        """
        log.info("Initializing the Elastic-Search environment object")
        self.namespace = "elastic-system"
        self.eck_file = "ocs_ci/templates/app-pods/eck.1.6.0-all-in-one.yaml"
        self.dumper_file = "ocs_ci/templates/app-pods/esclient.yaml"
        self.crd = "ocs_ci/templates/app-pods/esq.yaml"

        # Creating some different types of OCP objects
        self.ocp = OCP(kind="pod",
                       resource_name="elastic-operator-0",
                       namespace=self.namespace)
        self.ns_obj = OCP(kind="namespace", namespace=self.namespace)
        self.es = OCP(resource_name="quickstart-es-http",
                      namespace=self.namespace)
        self.elasticsearch = OCP(namespace=self.namespace,
                                 kind="elasticsearch")
        self.password = OCP(
            kind="secret",
            resource_name="quickstart-es-elastic-user",
            namespace=self.namespace,
        )

        # Deploy the ECK all-in-one.yaml file
        self._deploy_eck()
        # Deploy the Elastic-Search server
        self._deploy_es()

        # Verify that ES is Up & Running
        sample = TimeoutSampler(timeout=180, sleep=10, func=self.get_health)
        if not sample.wait_for_func_status(True):
            raise Exception("Elasticsearch deployment Failed")

        # Deploy the elasticsearch dumper pod
        self._deploy_data_dumper_client()

        # Connect to the server
        self.con = self._es_connect()
Ejemplo n.º 19
0
def wait_for_resource_count_change(func_to_use,
                                   previous_num,
                                   namespace,
                                   change_type='increase',
                                   min_difference=1,
                                   timeout=20,
                                   interval=2):
    """
    Wait for a change in total count of PVC or pod

    Args:
        func_to_use (function): Function to be used to fetch resource info
            Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
        previous_num (int): Previous number of pods/PVCs for comparison
        namespace (str): Name of the namespace
        change_type (str): Type of change to check. Accepted values are
            'increase' and 'decrease'. Default is 'increase'.
        min_difference (int): Minimum required difference in PVC/pod count
        timeout (int): Maximum wait time in seconds
        interval (int): Time in seconds to wait between consecutive checks

    Returns:
        True if difference in count is greater than or equal to
            'min_difference'. False in case of timeout.
    """
    try:
        for sample in TimeoutSampler(timeout, interval, func_to_use,
                                     namespace):
            if func_to_use == pod.get_all_pods:
                current_num = len(sample)
            else:
                current_num = len(sample['items'])

            if change_type == 'increase':
                count_diff = current_num - previous_num
            else:
                count_diff = previous_num - current_num
            if count_diff >= min_difference:
                return True
    except TimeoutExpiredError:
        return False
Ejemplo n.º 20
0
    def test_smallfile_workload(self, ripsaw):
        """
        Run SmallFile Workload
        """
        log.info("Apply Operator CRD")
        ripsaw.apply_crd('resources/crds/ripsaw_v1alpha1_ripsaw_crd.yaml')

        log.info("Running SmallFile bench")
        sf_data = templating.load_yaml_to_dict(
            constants.SMALLFILE_BENCHMARK_YAML)
        sf_obj = OCS(**sf_data)
        sf_obj.create()
        # wait for benchmark pods to get created - takes a while
        for bench_pod in TimeoutSampler(40, 3, get_pod_name_by_pattern,
                                        'smallfile-client', 'my-ripsaw'):
            try:
                if bench_pod[0] is not None:
                    small_file_client_pod = bench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        bench_pod = OCP(kind='pod', namespace='my-ripsaw')
        log.info("Waiting for SmallFile benchmark to Run")
        assert bench_pod.wait_for_resource(condition=constants.STATUS_RUNNING,
                                           resource_name=small_file_client_pod,
                                           sleep=30,
                                           timeout=600)
        start_time = time.time()
        timeout = 900
        while True:
            logs = bench_pod.exec_oc_cmd(f'logs {small_file_client_pod}',
                                         out_yaml_format=False)
            if "RUN STATUS DONE" in logs:
                log.info("SmallFile Benchmark Completed Successfully")
                break

            if timeout < (time.time() - start_time):
                raise TimeoutError(
                    f"Timed out waiting for benchmark to complete")
            time.sleep(30)
Ejemplo n.º 21
0
    def test_multiregion_bucket_creation(self, mcg_obj,
                                         multiregion_mirror_setup):
        """
        Test bucket creation using the S3 SDK
        """

        mirrored_bucket_name = multiregion_mirror_setup[0].name
        system_bucket, mirror_tier_name, mirror_attached_pools = (None, ) * 3

        # Make sure that the bucket is up and running
        try:
            for resp in TimeoutSampler(30, 3, mcg_obj.s3_get_all_bucket_names):
                if mirrored_bucket_name in resp:
                    break
                else:
                    logger.info(
                        f"Did not yet find mirrored bucket {mirrored_bucket_name}"
                    )
        except TimeoutExpiredError:
            logger.error(f"Could not find bucket {mirrored_bucket_name}")
            assert False

        # Retrieve the NooBaa system information
        system_state = (mcg_obj.send_rpc_query(
            "system_api", "read_system").json().get("reply"))

        # Retrieve the correct bucket's tier name
        for bucket in system_state.get("buckets"):
            if bucket.get("name") == mirrored_bucket_name:
                mirror_tier_name = bucket.get("tiering").get("tiers")[0].get(
                    "tier")
                break

        # Retrieved the pools attached to the tier
        for tier in system_state.get("tiers"):
            if tier.get("name") == mirror_tier_name:
                mirror_attached_pools = tier.get("attached_pools")
                break

        assert (len(mirror_attached_pools) == 2
                ), "Multiregion bucket did not have two backingstores attached"
Ejemplo n.º 22
0
def add_new_node_and_label_upi(node_type, num_nodes, mark_for_ocs_label=True):
    """
    Add a new node for aws/vmware upi platform and label it

    Args:
        node_type (str): Type of node, RHEL or RHCOS
        num_nodes (int): number of nodes to add
        mark_for_ocs_label (bool): True if label the new node

    Retuns:
        bool: True if node addition has done successfully

    """

    initial_nodes = tests.helpers.get_worker_nodes()
    from ocs_ci.ocs.platform_nodes import PlatformNodesFactory
    plt = PlatformNodesFactory()
    node_util = plt.get_nodes_platform()
    node_util.create_and_attach_nodes_to_cluster({}, node_type, num_nodes)
    for sample in TimeoutSampler(timeout=600,
                                 sleep=6,
                                 func=tests.helpers.get_worker_nodes):
        if len(sample) == len(initial_nodes) + num_nodes:
            break

    nodes_after_exp = tests.helpers.get_worker_nodes()
    wait_for_nodes_status(node_names=tests.helpers.get_worker_nodes(),
                          status=constants.NODE_READY)

    new_spun_nodes = list(set(nodes_after_exp) - set(initial_nodes))
    if node_type == constants.RHEL_OS:
        set_selinux_permissions(workers=new_spun_nodes)

    if mark_for_ocs_label:
        node_obj = ocp.OCP(kind='node')
        for new_spun_node in new_spun_nodes:
            node_obj.add_label(resource_name=new_spun_node,
                               label=constants.OPERATOR_NODE_LABEL)
            logging.info(
                f"Successfully labeled {new_spun_node} with OCS storage label")
    return True
Ejemplo n.º 23
0
def wait_for_pv_backingstore_resource_deleted(backingstore_name,
                                              namespace=None):
    """
    wait for pv backing store resources to be deleted at the end of test teardown

    Args:
        backingstore_name (str): backingstore name
        namespace (str): backing store's namespace

    """
    namespace = namespace or config.ENV_DATA["cluster_namespace"]
    sample = TimeoutSampler(
        timeout=120,
        sleep=15,
        func=check_resources_deleted,
        backingstore_name=backingstore_name,
        namespace=namespace,
    )
    if not sample.wait_for_func_status(result=True):
        log.error(f"Unable to delete resources of {backingstore_name}")
        raise TimeoutExpiredError
Ejemplo n.º 24
0
    def setup_quay_operator(self):
        """
        Deploys Quay operator

        """
        quay_operator_data = templating.load_yaml(file=constants.QUAY_SUB)
        self.quay_operator_csv = quay_operator_data["spec"]["startingCSV"]
        self.quay_operator = OCS(**quay_operator_data)
        logger.info(f"Installing Quay operator: {self.quay_operator.name}")
        self.quay_operator.create()
        for quay_pod in TimeoutSampler(
            300, 10, get_pod_name_by_pattern, constants.QUAY_OPERATOR, self.namespace
        ):
            if quay_pod:
                self.quay_pod_obj.wait_for_resource(
                    condition=constants.STATUS_RUNNING,
                    resource_name=quay_pod[0],
                    sleep=30,
                    timeout=600,
                )
                break
Ejemplo n.º 25
0
def wait_for_cache(mcg_obj, bucket_name, expected_objects_names=None):
    """
    wait for existing cache bucket to cache all required objects

    Args:
        mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
        bucket_name (str): Name of the cache bucket
        expected_objects_names (list): Expected objects to be cached

    """
    sample = TimeoutSampler(
        timeout=60,
        sleep=10,
        func=check_cached_objects_by_name,
        mcg_obj=mcg_obj,
        bucket_name=bucket_name,
        expected_objects_names=expected_objects_names,
    )
    if not sample.wait_for_func_status(result=True):
        logger.error("Objects were not able to cache properly")
        raise UnexpectedBehaviour
Ejemplo n.º 26
0
    def kill(self, node, timeout):
        """
        Kill the given service using systemctl.

        Args:
            node (object): Node objects
            timeout (int): time in seconds to wait for service to be stopped.

        """
        nodeip = self.nodes[node.name]
        cmd = f"ssh core@{nodeip} sudo systemctl kill {self.service_name}.service"
        result = exec_cmd(cmd)
        ret = TimeoutSampler(
            timeout=timeout,
            sleep=3,
            func=self.verify_service,
            node=node,
            action=INACTIVE,
        )
        logger.info(
            f"Result of kill of service {self.service_name} is {result}-{ret}")
Ejemplo n.º 27
0
    def wait_for_state(self, state, timeout=480, sleep=5):
        """
        Wait till state of catalog source resource is the same as required one
        passed in the state parameter.

        Args:
            state (str): Desired state of catalog source object
            timeout (int): Timeout in seconds to wait for desired state
            sleep (int): Time in seconds to sleep between attempts

        Raises:
            ResourceWrongStatusException: In case the catalog source is not in
                expected state.

        """
        self.check_name_is_specified()
        sampler = TimeoutSampler(timeout, sleep, self.check_state, state=state)
        if not sampler.wait_for_func_status(True):
            raise ResourceWrongStatusException(
                f"Catalog source: {self.resource_name} is not in expected "
                f"state: {state}")
Ejemplo n.º 28
0
def verify_block_pool_exists(pool_name):
    """
    Verify if a Ceph block pool exist

    Args:
        pool_name (str): The name of the Ceph block pool

    Returns:
        bool: True if the Ceph block pool exists, False otherwise
    """
    logger.info(f"Verifying that block pool {pool_name} exists")
    ct_pod = pod.get_ceph_tools_pod()
    try:
        for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd,
                                    'ceph osd lspools'):
            logger.info(f'POOLS are {pools}')
            for pool in pools:
                if pool_name in pool.get('poolname'):
                    return True
    except TimeoutExpiredError:
        return False
Ejemplo n.º 29
0
    def run(self):
        """
        Run the benchmark and wait until it completed

        """
        # Create the benchmark object
        self.sf_obj = OCS(**self.crd_data)
        self.sf_obj.create()

        # Wait for benchmark pods to get created - takes a while
        for bench_pod in TimeoutSampler(
                240,
                10,
                get_pod_name_by_pattern,
                "smallfile-client",
                benchmark_operator.BMO_NAME,
        ):
            try:
                if bench_pod[0] is not None:
                    small_file_client_pod = bench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        bench_pod = OCP(kind="pod", namespace=benchmark_operator.BMO_NAME)
        log.info("Waiting for SmallFile benchmark to Run")
        assert bench_pod.wait_for_resource(
            condition=constants.STATUS_RUNNING,
            resource_name=small_file_client_pod,
            sleep=30,
            timeout=600,
        )
        log.info("The SmallFiles benchmark is running, wait for completion")
        bench_pod.wait_for_resource(
            condition=constants.STATUS_COMPLETED,
            resource_name=small_file_client_pod,
            timeout=18000,
            sleep=60,
        )
        log.info("The SmallFiles benchmark is completed")
Ejemplo n.º 30
0
Archivo: csr.py Proyecto: wusui/ocs-ci
def wait_for_all_nodes_csr_and_approve(timeout=900,
                                       sleep=10,
                                       expected_node_num=None):
    """
    Wait for CSR to generate for nodes

    Args:
        timeout (int): Time in seconds to wait
        sleep (int): Sampling time in seconds
        expected_node_num (int): Number of nodes to verify CSR is generated

    Returns:
         bool: True if all nodes are generated CSR

    Raises:
        TimeoutExpiredError: in case CSR not found

    """
    if not expected_node_num:
        # expected number of nodes is total of master, worker nodes and
        # bootstrapper node
        expected_node_num = (config.ENV_DATA['master_replicas'] +
                             config.ENV_DATA['worker_replicas'] + 1)
    for csr_nodes in TimeoutSampler(timeout=timeout,
                                    sleep=sleep,
                                    func=get_nodes_csr):
        logger.debug(f"CSR data: {csr_nodes}")
        if len(csr_nodes.keys()) == expected_node_num:
            logger.info(f"CSR generated for all {expected_node_num} nodes")
            approve_pending_csr()
            return
        logger.warning(
            f"Some nodes are not generated CSRs. Expected"
            f" {expected_node_num} but found {len(csr_nodes.keys())} CSRs."
            f"retrying again")
        # approve the pending CSRs here since newly added nodes will not
        # generate CSR till existing CSRs are approved
        pending_csrs = get_pending_csr()
        if pending_csrs:
            approve_csrs(pending_csrs)