def test_retry_any(self): r = tenacity.retry_any(tenacity.retry_if_result(lambda x: x == 1), tenacity.retry_if_result(lambda x: x == 2)) self.assertTrue(r(tenacity.Future.construct(1, 1, False))) self.assertTrue(r(tenacity.Future.construct(1, 2, False))) self.assertFalse(r(tenacity.Future.construct(1, 3, False))) self.assertFalse(r(tenacity.Future.construct(1, 1, True)))
def retry_request(method, prepared_req, session, max_retries=10): retry_conditions = None def _make_request(req, sess): """send the prepared session request""" response = sess.send(req) return response def _return_last_value(retry_state): """return the result of the last call attempt and let code pick up the error""" return retry_state.outcome.result() if method.upper() == 'POST': retry_conditions = (retry_if_result( lambda res: res.status_code in civis.civis.POST_RETRY_CODES)) elif method.upper() in civis.civis.RETRY_VERBS: retry_conditions = (retry_if_result( lambda res: res.status_code in civis.civis.RETRY_CODES)) if retry_conditions: retry_config = Retrying( retry=retry_conditions, wait=wait_for_retry_after_header( fallback=wait_random_exponential(multiplier=2, max=60)), stop=(stop_after_delay(600) | stop_after_attempt(max_retries)), retry_error_callback=_return_last_value, ) response = retry_config(_make_request, prepared_req, session) return response response = _make_request(prepared_req, session) return response
def test_retry_or(self): r = (tenacity.retry_if_result(lambda x: x == "foo") | tenacity.retry_if_result(lambda x: isinstance(x, int))) self.assertTrue(r(tenacity.Future.construct(1, "foo", False))) self.assertFalse(r(tenacity.Future.construct(1, "foobar", False))) self.assertFalse(r(tenacity.Future.construct(1, 2.2, False))) self.assertFalse(r(tenacity.Future.construct(1, 42, True)))
def test_retry_and(self): r = (tenacity.retry_if_result(lambda x: x == 1) & tenacity.retry_if_result(lambda x: isinstance(x, int))) self.assertTrue(r(tenacity.Future.construct(1, 1, False))) self.assertFalse(r(tenacity.Future.construct(1, 2, False))) self.assertFalse(r(tenacity.Future.construct(1, 3, False))) self.assertFalse(r(tenacity.Future.construct(1, 1, True)))
def test_retry_and(self): retry = (tenacity.retry_if_result(lambda x: x == 1) & tenacity.retry_if_result(lambda x: isinstance(x, int))) def r(fut): retry_state = make_retry_state(1, 1.0, last_result=fut) return retry(retry_state) self.assertTrue(r(tenacity.Future.construct(1, 1, False))) self.assertFalse(r(tenacity.Future.construct(1, 2, False))) self.assertFalse(r(tenacity.Future.construct(1, 3, False))) self.assertFalse(r(tenacity.Future.construct(1, 1, True)))
def test_retry_or(self): retry = (tenacity.retry_if_result(lambda x: x == "foo") | tenacity.retry_if_result(lambda x: isinstance(x, int))) def r(fut): retry_state = make_retry_state(1, 1.0, last_result=fut) return retry(retry_state) self.assertTrue(r(tenacity.Future.construct(1, "foo", False))) self.assertFalse(r(tenacity.Future.construct(1, "foobar", False))) self.assertFalse(r(tenacity.Future.construct(1, 2.2, False))) self.assertFalse(r(tenacity.Future.construct(1, 42, True)))
def wait_for_valid_status(envelope_url, http_requests): """ Check the status of the submission. Retry until the status is "Valid", or if there is an error with the request to get the submission envelope. Args: envelope_url (str): Submission envelope url http_requests (HttpRequests): HttpRequests object Returns: str: Status of the submission ("Valid", "Validating", etc.) Raises: requests.HTTPError: if 4xx error or 5xx error past timeout tenacity.RetryError: if status is invalid past timeout """ def log_before(envelope_url): now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') print('{0} Getting status for {1}'.format(now, envelope_url)) def keep_polling(response): # Keep polling until the status is "Valid/Complete" or "Invalid" envelope_js = response.json() status = envelope_js.get('submissionState') print('submissionState: {}'.format(status)) return status not in ('Valid', 'Complete', 'Invalid') response = http_requests.get( envelope_url, before=log_before(envelope_url), retry=retry_if_result(keep_polling), ) return response.json()
def retry_upon_none_result(max_attempts, delay=0.5, max_delay=2): return tenacity.retry(reraise=True, retry=tenacity.retry_if_result(lambda x: x is None), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry)
def batch_wait(self, tasks, **retry_kwargs): """ Wait until a list of task are completed. Expires after 'timeout' seconds. Returns a tuple of list (pending_tasks, success_tasks, error_tasks). Each list contains a couple (original_position, task) sorted by original_position asc original_position gives the original index in the input tasks list parameter. This helps to keep the order. """ retry_kwargs['timeout'] = retry_kwargs.get('timeout', 300) try: positions = {} pending_tasks = [] for pos, task in enumerate(tasks): positions[task.pk] = pos pending_tasks.append((pos, task)) success_tasks = [] error_tasks = [] functor = functools.partial(self._refresh_tasks_status, pending_tasks, success_tasks, error_tasks, positions) retry_get_tasks(functor, retry_if_result(has_pending_tasks), **retry_kwargs) except RetryError: pass return (sorted(pending_tasks, key=lambda v: v[0]), sorted(success_tasks, key=lambda v: v[0]), sorted(error_tasks, key=lambda v: v[0]))
def sync(cnxt, entity_id, current_traversal, is_update, propagate, predecessors, new_data): # Retry waits up to 60 seconds at most, with exponentially increasing # amounts of jitter per resource still outstanding wait_strategy = tenacity.wait_random_exponential(max=60) def init_jitter(existing_input_data): nconflicts = max(0, len(predecessors) - len(existing_input_data) - 1) # 10ms per potential conflict, up to a max of 10s in total return min(nconflicts, 1000) * 0.01 @tenacity.retry( retry=tenacity.retry_if_result(lambda r: r is None), wait=wait_strategy ) def _sync(): sync_point = get(cnxt, entity_id, current_traversal, is_update) input_data = deserialize_input_data(sync_point.input_data) wait_strategy.multiplier = init_jitter(input_data) input_data.update(new_data) rows_updated = update_input_data( cnxt, entity_id, current_traversal, is_update, sync_point.atomic_key, serialize_input_data(input_data)) return input_data if rows_updated else None input_data = _sync() waiting = predecessors - set(input_data) key = make_key(entity_id, current_traversal, is_update) if waiting: LOG.debug('[%s] Waiting %s: Got %s; still need %s', key, entity_id, _dump_list(input_data), _dump_list(waiting)) else: LOG.debug('[%s] Ready %s: Got %s', key, entity_id, _dump_list(input_data)) propagate(entity_id, serialize_input_data(input_data))
def run(envelope_url, http_requests): """Check the contents of the submission envelope for the upload urn. Retry until the envelope contains a upload urn, or if there is an error with the request. Args: http_requests (HttpRequests): an HttpRequests object. envelope_url (str): the submission envelope url Returns: String giving the upload urn in the format dcp:upl:aws:integration:12345:abcde Raises: requests.HTTPError: for 4xx errors or 5xx errors beyond timeout tenacity.RetryError: if urn is missing beyond timeout """ def urn_is_none(response): envelope_js = response.json() urn = get_upload_urn(envelope_js) now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') print('{0} Upload urn: {1}'.format(now, urn)) return urn is None response = http_requests.get(envelope_url, retry=retry_if_result(urn_is_none)) urn = get_upload_urn(response.json()) return urn
def batch_wait(self, tasks, timeout=300, wait_exp_multiplier=0.05, wait_exp_max=1.0): """ Wait until a list of task are completed. Expires after 'timeout' seconds. Returns a tuple of list (pending_tasks, success_tasks, error_tasks). Each list contains a couple (original_position, task) sorted by original_position asc original_position gives the original index in the input tasks list parameter. This helps to keep the order. """ try: positions = {} pending_tasks = [] for pos, task in enumerate(tasks): positions[task.pk] = pos pending_tasks.append((pos, task)) success_tasks = [] error_tasks = [] retryer = Retrying(wait=wait_random_exponential(multiplier=wait_exp_multiplier, max=wait_exp_max), stop=stop_after_delay(timeout), retry=retry_if_result(has_pending_tasks), before=before_log(logger, logging.DEBUG), after=after_log(logger, logging.DEBUG)) retryer(self._refresh_tasks_status, pending_tasks, success_tasks, error_tasks, positions) except RetryError: pass return (sorted(pending_tasks, key=lambda v: v[0]), sorted(success_tasks, key=lambda v: v[0]), sorted(error_tasks, key=lambda v: v[0]))
def __init__(self, configuration=None, header_name=None, header_value=None, cookie=None, pool_threads=1): if configuration is None: configuration = Configuration() self.configuration = configuration self.pool_threads = pool_threads self.retrying = tenacity.Retrying( stop=tenacity.stop_after_attempt(configuration.retry_count), wait=tenacity.wait_random_exponential( multiplier=configuration.back_off, max=configuration.retry_max_delay, min=configuration.retry_delay), retry=(tenacity.retry_if_result(self.is_retry_enabled) and ((tenacity.retry_if_exception_type(RetryableException)) | (tenacity.retry_if_exception_type(HTTPError))))) self.rest_client = rest.RESTClientObject(configuration, retrying=self.retrying) self.default_headers = {} if header_name is not None: self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. self.user_agent = 'opsgenie-sdk-python-2.0.2' # init metric publishers self.http_metric_publisher = self.rest_client.http_metric self.api_metric_publisher = metrics.ApiMetric('ApiMetricPublisher') self.sdk_metric_publisher = metrics.SdkMetric('SdkMetricPublisher')
def wait_until_complete( self, package_id: str, spin_cb: Callable = None ) -> transfer_service_api.request_response_pb2.ReadResponse: """Blocks until processing of a package has completed.""" def _should_continue( resp: transfer_service_api.request_response_pb2.ReadResponse, ): return (resp.status == transfer_service_api.request_response_pb2. PACKAGE_STATUS_PROCESSING) def _callback(retry_state): if spin_cb is not None: spin_cb(retry_state) @tenacity.retry( wait=tenacity.wait_fixed(1), retry=tenacity.retry_if_result(_should_continue), after=_callback, ) def _poll(): """Retries while the package is processing.""" return self.read(package_id) return _poll()
def _blocking_poll(self, timeout=None): """Poll and wait for the Future to be resolved. Args: timeout (int): How long to wait for the operation to complete. If None, wait indefinitely. """ if self._result_set: return retry_on = tenacity.retry_if_result( functools.partial(operator.is_not, True)) # Use exponential backoff with jitter. wait_on = (tenacity.wait_exponential(multiplier=1, max=10) + tenacity.wait_random(0, 1)) if timeout is None: retry = tenacity.retry(retry=retry_on, wait=wait_on) else: retry = tenacity.retry(retry=retry_on, wait=wait_on, stop=tenacity.stop_after_delay(timeout)) try: retry(self.done)() except tenacity.RetryError as exc: six.raise_from( concurrent.futures.TimeoutError( 'Operation did not complete within the designated ' 'timeout.'), exc)
def sync(cnxt, entity_id, current_traversal, is_update, propagate, predecessors, new_data): # Retry waits up to 60 seconds at most, with exponentially increasing # amounts of jitter per resource still outstanding wait_strategy = tenacity.wait_random_exponential(max=60) def init_jitter(existing_input_data): nconflicts = max(0, len(predecessors) - len(existing_input_data) - 1) # 10ms per potential conflict, up to a max of 10s in total return min(nconflicts, 1000) * 0.01 @tenacity.retry(retry=tenacity.retry_if_result(lambda r: r is None), wait=wait_strategy) def _sync(): sync_point = get(cnxt, entity_id, current_traversal, is_update) input_data = deserialize_input_data(sync_point.input_data) wait_strategy.multiplier = init_jitter(input_data) input_data.update(new_data) rows_updated = update_input_data(cnxt, entity_id, current_traversal, is_update, sync_point.atomic_key, serialize_input_data(input_data)) return input_data if rows_updated else None input_data = _sync() waiting = predecessors - set(input_data) key = make_key(entity_id, current_traversal, is_update) if waiting: LOG.debug('[%s] Waiting %s: Got %s; still need %s', key, entity_id, _dump_list(input_data), _dump_list(waiting)) else: LOG.debug('[%s] Ready %s: Got %s', key, entity_id, _dump_list(input_data)) propagate(entity_id, serialize_input_data(input_data))
def _retry(func, retry_if_result=is_false, **kwargs): kwargs.setdefault('retry', tenacity.retry_if_result(retry_if_result)) kwargs.setdefault('wait', tenacity.wait_fixed(1)) kwargs.setdefault('stop', tenacity.stop_after_delay(10)) decorator = tenacity.retry(**kwargs) decorated_func = decorator(func) return decorated_func()
def wrapped(*args, **kwargs): self = args[0] new_fn = tenacity.retry( reraise=True, retry=tenacity.retry_if_result(_ovsdb_result_pending), wait=tenacity.wait_exponential(multiplier=0.01, max=1), stop=tenacity.stop_after_delay(self.vsctl_timeout))(fn) return new_fn(*args, **kwargs)
def wait_till_jobs_complete(self, node, timeout=3600): name = node.get('metadata', {}).get('name') retryer = tenacity.Retrying( stop=tenacity.stop_after_delay(timeout), retry=tenacity.retry_if_result( lambda result: len(result.get('items', [])) != 0), wait=tenacity.wait_fixed(5)) retryer(self._get_job_pods_in_node, name, "Running")
def wait_for(func, timeout=constants.timeouts.TWO_MIN_USER_WAIT): """Waits for function to return truthy value.""" def is_falsy(value): """Return whether value if falsy (None or False).""" return not value return tenacity.Retrying(stop=tenacity.stop_after_delay(timeout), retry=tenacity.retry_if_result(is_falsy))(func)
def wait_for(func, timeout=constants.ux.TWO_MIN_USER_WAIT): """Waits for function to return truthy value.""" def is_falsy(value): """Return whether value if falsy (None or False).""" return not value return tenacity.Retrying( stop=tenacity.stop_after_delay(timeout), retry=tenacity.retry_if_result(is_falsy))(func)
def wait_for(func): """Waits for function to return truthy value.""" def is_falsy(value): """Return whether value if falsy (None or False).""" return not value return tenacity.Retrying( stop=tenacity.stop_after_delay(constants.ux.MAX_USER_WAIT_SECONDS), retry=tenacity.retry_if_result(is_falsy))(func)
def invoke(self): retryer = Retrying( retry=retry_if_exception_type(requests.exceptions.Timeout) | retry_if_exception_type(requests.exceptions.ConnectionError) | retry_if_result(self.retry_if_we_need_to), stop=stop_after_delay(self.max_time), wait=wait_exponential(multiplier=self.delay, max=self.max_delay), ) return retryer(self.post)
def wrapped(*args, **kwargs): self = args[0] new_fn = tenacity.retry( reraise=True, retry=tenacity.retry_if_result(_ofport_result_pending), wait=tenacity.wait_exponential(multiplier=0.01, max=1), stop=tenacity.stop_after_delay( self.vsctl_timeout))(fn) return new_fn(*args, **kwargs)
def tear_down_agent(self, task): """A deploy step to tear down the agent. Shuts down the machine and removes it from the provisioning network. :param task: a TaskManager object containing the node """ wait = CONF.ansible.post_deploy_get_power_state_retry_interval attempts = CONF.ansible.post_deploy_get_power_state_retries + 1 @tenacity.retry(stop=tenacity.stop_after_attempt(attempts), retry=(tenacity.retry_if_result( lambda state: state != states.POWER_OFF) | tenacity.retry_if_exception_type(Exception)), wait=tenacity.wait_fixed(wait), reraise=True) def _wait_until_powered_off(task): return task.driver.power.get_power_state(task) node = task.node oob_power_off = strutils.bool_from_string( node.driver_info.get('deploy_forces_oob_reboot', False)) try: if not oob_power_off: try: node_address = _get_node_ip(task) playbook, user, key = _parse_ansible_driver_info( node, action='shutdown') node_list = [(node.uuid, node_address, user, node.extra)] extra_vars = _prepare_extra_vars(node_list) _run_playbook(node, playbook, extra_vars, key) _wait_until_powered_off(task) except Exception as e: LOG.warning( 'Failed to soft power off node %(node_uuid)s ' 'in at least %(timeout)d seconds. ' 'Error: %(error)s', { 'node_uuid': node.uuid, 'timeout': (wait * (attempts - 1)) / 1000, 'error': e }) # NOTE(pas-ha) flush is a part of deploy playbook # so if it finished successfully we can safely # power off the node out-of-band manager_utils.node_power_action(task, states.POWER_OFF) else: manager_utils.node_power_action(task, states.POWER_OFF) except Exception as e: msg = (_('Error rebooting node %(node)s after deploy. ' 'Error: %(error)s') % { 'node': node.uuid, 'error': e }) agent_base.log_and_raise_deployment_error(task, msg)
def _retry_method(self, target, listener_type, method): wait_timeout = self.conf.matchmaker_redis.wait_timeout / 1000. check_timeout = self.conf.matchmaker_redis.check_timeout / 1000. @tenacity.retry(retry=tenacity.retry_if_result(is_empty), wait=tenacity.wait_fixed(wait_timeout), stop=tenacity.stop_after_delay(check_timeout)) def _get_hosts_retry(target, listener_type): return method(target, listener_type) return _get_hosts_retry(target, listener_type)
def retry_air_exists(self, whether_retry=True, sleeps=1.5, max_attempts=3, threshold=None): """ 可重试 exists Args: threshold: self: Img Template whether_retry: whether to retry sleeps: time between retry max_attempts: max retry times Returns: pos or False """ with allure.step("检测UI图像: {}".format(str(self))): if not whether_retry: max_attempts = 1 def retry_exists(): try: logging.debug("img template threshold: {}".format(self.threshold)) pos = loop_find(self, timeout=ST.FIND_TIMEOUT_TMP, threshold=threshold) except TargetNotFoundError: return False else: return pos def need_retry(value): """ value为False时需要重试 Args: value: function的返回值,自动填入 Returns: """ logging.debug("need retry aircv exists?: {}".format(value is False)) return value is False r = Retrying(retry=retry_if_result(need_retry), stop=stop_after_attempt(max_attempts), wait=wait_fixed(sleeps), before_sleep=my_before_sleep) res = None try: res = r(r, retry_exists) except RetryError: res = False finally: logging.info("aircv find {}: {}".format(str(self), False if res is False else res)) logging.debug("retry aircv exists statistics: {}".format(str(r.statistics))) allure.attach.file(self.filepath, name="aircv find img result: {}, img:".format(False if res is False else res), attachment_type=allure.attachment_type.PNG) return res
def retry_upon_none_result(max_attempts, delay=0.5, max_delay=2, random=False): if random: wait_func = tenacity.wait_exponential( multiplier=delay, max=max_delay) else: wait_func = tenacity.wait_random_exponential( multiplier=delay, max=max_delay) return tenacity.retry(reraise=True, retry=tenacity.retry_if_result(lambda x: x is None), wait=wait_func, stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry)
def retry_upon_none_result(max_attempts, delay=0.5, max_delay=2, random=False): if random: wait_func = tenacity.wait_exponential(multiplier=delay, max=max_delay) else: wait_func = tenacity.wait_random_exponential(multiplier=delay, max=max_delay) return tenacity.retry(reraise=True, retry=tenacity.retry_if_result(lambda x: x is None), wait=wait_func, stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry)
async def wait_for_stable(self, release_name: str, arch: str, release_stream: str): go_arch = util.go_arch_for_brew_arch(arch) release_controller_url = f"https://{go_arch}.ocp.releases.ci.openshift.org" if self.runtime.dry_run: actual_phase = await self.get_release_phase(release_controller_url, release_stream, release_name) self._logger.warning("[DRY RUN] Release %s for %s has phase %s. Assume accepted.", release_name, arch, actual_phase) return return await retry( stop=(stop_after_attempt(36)), # wait for 5m * 36 = 180m = 3 hours wait=wait_fixed(300), # wait for 5 minutes between retries retry=(retry_if_result(lambda phase: phase != "Accepted") | retry_if_exception_type()), before_sleep=before_sleep_log(self._logger, logging.WARNING), )(self.get_release_phase)(release_controller_url, release_stream, release_name)
def wait_for(func, step=1, text=None, timeout=None, throw_exc=True, **kwargs): """ Wrapper function to wait with timeout option. :param func: Function to evaluate. :param step: Time to sleep between attempts in seconds :param text: Text to print while waiting, for debug purposes :param timeout: Timeout in seconds :param throw_exc: Raise exception if timeout expired, but disrupt_func result is not True :param kwargs: Keyword arguments to disrupt_func :return: Return value of disrupt_func. """ if not timeout: return forever_wait_for(func, step, text, **kwargs) res = None def retry_logger(retry_state): # pylint: disable=protected-access LOGGER.debug( 'wait_for: Retrying %s: attempt %s ended with: %s', text or retry_state.fn.__name__, retry_state.attempt_number, retry_state.outcome._exception or retry_state.outcome._result, ) try: retry = tenacity.Retrying( reraise=throw_exc, stop=tenacity.stop_after_delay(timeout), wait=tenacity.wait_fixed(step), before_sleep=retry_logger, retry=(tenacity.retry_if_result(lambda value: not value) | tenacity.retry_if_exception_type())) res = retry(func, **kwargs) except Exception as ex: # pylint: disable=broad-except err = f"Wait for: {text or func.__name__}: timeout - {timeout} seconds - expired" LOGGER.error(err) if hasattr(ex, 'last_attempt') and ex.last_attempt.exception() is not None: # pylint: disable=no-member LOGGER.error("last error: %r", ex.last_attempt.exception()) # pylint: disable=no-member else: LOGGER.error("last error: %r", ex) if throw_exc: if hasattr(ex, 'last_attempt') and not ex.last_attempt._result: # pylint: disable=protected-access,no-member raise tenacity.RetryError(err) from ex raise return res
def lookup_node(self, hardware_info, timeout, starting_interval, node_uuid=None, max_interval=30): retry = tenacity.retry( retry=tenacity.retry_if_result(lambda r: r is False), stop=tenacity.stop_after_delay(timeout), wait=tenacity.wait_random_exponential(min=starting_interval, max=max_interval), reraise=True) try: return retry(self._do_lookup)(hardware_info=hardware_info, node_uuid=node_uuid) except tenacity.RetryError: raise errors.LookupNodeError('Could not look up node info. Check ' 'logs for details.')
def _put_object_safe(self, Bucket, Key, Body): put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body) if self._consistency_stop: def _head(): return self.s3.head_object(Bucket=Bucket, Key=Key, IfMatch=put['ETag']) tenacity.Retrying( retry=tenacity.retry_if_result( lambda r: r['ETag'] != put['ETag']), wait=self._consistency_wait, stop=self._consistency_stop)(_head)
def _put_object_safe(self, Bucket, Key, Body): put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body) if self._consistency_stop: def _head(): return self.s3.head_object(Bucket=Bucket, Key=Key, IfMatch=put['ETag']) tenacity.Retrying(retry=tenacity.retry_if_result( lambda r: r['ETag'] != put['ETag']), wait=self._consistency_wait, stop=self._consistency_stop)(_head)
def wait_for_active_server_channel(self): retryer = tenacity.Retrying( retry=(tenacity.retry_if_result(lambda r: r is None) | tenacity.retry_if_exception_type()), wait=tenacity.wait_exponential(min=10, max=25), stop=tenacity.stop_after_delay(60 * 3), reraise=True) logger.info( 'Waiting for client %s to establish READY gRPC channel with %s', self.ip, self.server_target) channel = retryer(self.get_active_server_channel) logger.info( 'gRPC channel between client %s and %s transitioned to READY:\n%s', self.ip, self.server_target, channel)
class BaseCharmOperationTest(test_utils.BaseCharmTest): """Base OVN Charm operation tests.""" # override if not possible to determine release pair from charm under test release_application = None @classmethod def setUpClass(cls): """Run class setup for OVN charm operation tests.""" super(BaseCharmOperationTest, cls).setUpClass() cls.services = ['NotImplemented'] # This must be overridden cls.nrpe_checks = ['NotImplemented'] # This must be overridden cls.current_release = openstack_utils.get_os_release( openstack_utils.get_current_os_release_pair( cls.release_application or cls.application_name)) @tenacity.retry( retry=tenacity.retry_if_result(lambda ret: ret is not None), # sleep for 2mins to allow 1min cron job to run... wait=tenacity.wait_fixed(120), stop=tenacity.stop_after_attempt(2)) def _retry_check_commands_on_units(self, cmds, units): return generic_utils.check_commands_on_units(cmds, units) def test_pause_resume(self): """Run pause and resume tests. Pause service and check services are stopped, then resume and check they are started. """ with self.pause_resume(self.services): logging.info('Testing pause resume (services="{}")'.format( self.services)) def test_nrpe_configured(self): """Confirm that the NRPE service check files are created.""" if self.current_release == openstack_utils.get_os_release( 'jammy_yoga'): self.skipTest('The NRPE charm does not support Jammy yet') units = zaza.model.get_units(self.application_name) cmds = [] for check_name in self.nrpe_checks: cmds.append('egrep -oh /usr/local.* /etc/nagios/nrpe.d/' 'check_{}.cfg'.format(check_name)) ret = self._retry_check_commands_on_units(cmds, units) if ret: logging.info(ret) self.assertIsNone(ret, msg=ret)
def wait(self, timeout=60, wait_exp_multiplier=0.05, wait_exp_max=1.0): """ Wait until task is completed. Expires after 'timeout' seconds. """ try: retryer = Retrying(wait=wait_random_exponential(multiplier=wait_exp_multiplier, max=wait_exp_max), stop=stop_after_delay(timeout), retry=retry_if_result(is_pending_status), before=before_log(logger, logging.DEBUG), after=after_log(logger, logging.DEBUG)) retryer(self._refresh_status) except RetryError: raise TaskTimeout(self.data()) if is_error_status(self['status']): raise TaskError(self.data()) return self