def batch_wait(self, tasks, timeout=300, wait_exp_multiplier=0.05, wait_exp_max=1.0):
        """
        Wait until a list of task are completed. Expires after 'timeout' seconds.

        Returns a tuple of list (pending_tasks, success_tasks, error_tasks).
        Each list contains a couple (original_position, task) sorted by original_position asc
        original_position gives the original index in the input tasks list parameter. This helps to keep the order.
        """
        try:
            positions = {}
            pending_tasks = []
            for pos, task in enumerate(tasks):
                positions[task.pk] = pos
                pending_tasks.append((pos, task))
            success_tasks = []
            error_tasks = []
            retryer = Retrying(wait=wait_random_exponential(multiplier=wait_exp_multiplier, max=wait_exp_max),
                               stop=stop_after_delay(timeout),
                               retry=retry_if_result(has_pending_tasks),
                               before=before_log(logger, logging.DEBUG),
                               after=after_log(logger, logging.DEBUG))
            retryer(self._refresh_tasks_status, pending_tasks, success_tasks, error_tasks, positions)
        except RetryError:
            pass

        return (sorted(pending_tasks, key=lambda v: v[0]),
                sorted(success_tasks, key=lambda v: v[0]),
                sorted(error_tasks, key=lambda v: v[0]))
 def send_request(self, socket, request):
     @tenacity.retry(retry=tenacity.retry_if_exception_type(zmq.Again),
                     stop=tenacity.stop_after_delay(
                         self.conf.rpc_response_timeout))
     def send_retrying():
         self._do_send(socket, request)
     return send_retrying()
Esempio n. 3
0
def wait_for(func, timeout=constants.ux.TWO_MIN_USER_WAIT):
  """Waits for function to return truthy value."""
  def is_falsy(value):
    """Return whether value if falsy (None or False)."""
    return not value
  return tenacity.Retrying(
      stop=tenacity.stop_after_delay(timeout),
      retry=tenacity.retry_if_result(is_falsy))(func)
Esempio n. 4
0
 def wrapped(*args, **kwargs):
     self = args[0]
     new_fn = tenacity.retry(
         reraise=True,
         retry=tenacity.retry_if_result(_ofport_result_pending),
         wait=tenacity.wait_exponential(multiplier=0.01, max=1),
         stop=tenacity.stop_after_delay(
             self.vsctl_timeout))(fn)
     return new_fn(*args, **kwargs)
    def send_request(self, socket, request):
        if hasattr(request, 'timeout'):
            _stop = tenacity.stop_after_delay(request.timeout)
        elif request.retry is not None and request.retry > 0:
            # no rpc_response_timeout option if notification
            _stop = tenacity.stop_after_attempt(request.retry)
        else:
            # well, now what?
            _stop = tenacity.stop_after_delay(60)

        @tenacity.retry(retry=tenacity.retry_if_exception_type(zmq.Again),
                        stop=_stop)
        def send_retrying():
            if request.msg_type in zmq_names.MULTISEND_TYPES:
                for _ in range(socket.connections_count()):
                    self.sender.send(socket, request)
            else:
                self.sender.send(socket, request)
        return send_retrying()
Esempio n. 6
0
File: s3.py Progetto: luo-zn/gnocchi
 def __init__(self, conf):
     super(S3Storage, self).__init__(conf)
     self.s3, self._region_name, self._bucket_prefix = (
         s3.get_connection(conf)
     )
     self._bucket_name = '%s-aggregates' % self._bucket_prefix
     if conf.s3_check_consistency_timeout > 0:
         self._consistency_stop = tenacity.stop_after_delay(
             conf.s3_check_consistency_timeout)
     else:
         self._consistency_stop = None
Esempio n. 7
0
 def __init__(self, retry_period, wrapped_connection):
     self.wrapped_connection = wrapped_connection
     self.retry_kwargs = {
         'retry': (
             retry_if_exception_type(redis.exceptions.ConnectionError) |
             retry_if_exception_type(redis.exceptions.TimeoutError)
         ),
         'reraise': True,
         'wait': wait_exponential(multiplier=1, max=self.RETRY_MAX_WAIT),
         'before_sleep': self._log_retry_attempt
     }
     if retry_period >= 0:
         self.retry_kwargs.update({'stop': stop_after_delay(retry_period)})
Esempio n. 8
0
class Auth0(object):
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
        self.auth0  = self._authenticate()

    def _authenticate(self):
        get_token = GetToken(self.domain)
        token = get_token.client_credentials(self.clientid,
            self.clientsecret, 'https://{}/api/v2/'.format(self.domain))
        return a0(self.domain, token['access_token'])

    @tenacity.retry(wait=tenacity.wait_fixed(5), stop=tenacity.stop_after_delay(10), retry=tenacity.retry_if_exception_type(Auth0Error))
    def get_connections(self):
        return self.auth0.connections.all()

    @tenacity.retry(wait=tenacity.wait_fixed(5), stop=tenacity.stop_after_delay(10), retry=tenacity.retry_if_exception_type(Auth0Error))
    def get_connection(self, id):
        return self.auth0.connections.get(id)

    @tenacity.retry(wait=tenacity.wait_fixed(5), stop=tenacity.stop_after_delay(10), retry=tenacity.retry_if_exception_type(Auth0Error))
    def update_connection(self, id, body):
        return self.auth0.emails.update(id, body)
Esempio n. 9
0
    def test_stop_all(self):
        stop = tenacity.stop_all(tenacity.stop_after_delay(1),
                                 tenacity.stop_after_attempt(4))

        def s(*args):
            return stop(make_retry_state(*args))

        self.assertFalse(s(1, 0.1))
        self.assertFalse(s(2, 0.2))
        self.assertFalse(s(2, 0.8))
        self.assertFalse(s(4, 0.8))
        self.assertFalse(s(3, 1.8))
        self.assertTrue(s(4, 1.8))
Esempio n. 10
0
def wait_for(func, step=1, text=None, timeout=None, throw_exc=True, **kwargs):
    """
    Wrapper function to wait with timeout option.

    :param func: Function to evaluate.
    :param step: Time to sleep between attempts in seconds
    :param text: Text to print while waiting, for debug purposes
    :param timeout: Timeout in seconds
    :param throw_exc: Raise exception if timeout expired, but func result is not True
    :param kwargs: Keyword arguments to func
    :return: Return value of func.
    """
    if not timeout:
        return forever_wait_for(func, step, text, **kwargs)

    res = None

    def retry_logger(retry_state):
        # pylint: disable=protected-access
        LOGGER.debug(
            'wait_for: Retrying %s: attempt %s ended with: %s',
            text if text else retry_state.fn.__name__,
            retry_state.attempt_number,
            str(retry_state.outcome._exception)
            if retry_state.outcome._exception else retry_state.outcome._result)

    try:
        retry = tenacity.Retrying(
            reraise=throw_exc,
            stop=tenacity.stop_after_delay(timeout),
            wait=tenacity.wait_fixed(step),
            before_sleep=retry_logger,
            retry=(retry_if_result(lambda value: not value)
                   | retry_if_exception_type()))
        res = retry.call(func, **kwargs)

    except Exception as ex:  # pylint: disable=broad-except
        err = 'Wait for: {}: timeout - {} seconds - expired'.format(
            text if text else func.__name__, timeout)
        LOGGER.error(err)
        if hasattr(ex,
                   'last_attempt') and ex.last_attempt.exception() is not None:  # pylint: disable=no-member
            LOGGER.error("last error: %s", repr(ex.last_attempt.exception()))  # pylint: disable=no-member
        else:
            LOGGER.error("last error: %s", repr(ex))
        if throw_exc:
            if hasattr(ex, 'last_attempt') and not ex.last_attempt._result:  # pylint: disable=protected-access,no-member
                raise RetryError(err) from ex
            raise

    return res
 def lookup_node(self, hardware_info, timeout, starting_interval,
                 node_uuid=None, max_interval=30):
     retry = tenacity.retry(
         retry=tenacity.retry_if_result(lambda r: r is False),
         stop=tenacity.stop_after_delay(timeout),
         wait=tenacity.wait_random_exponential(min=starting_interval,
                                               max=max_interval),
         reraise=True)
     try:
         return retry(self._do_lookup)(hardware_info=hardware_info,
                                       node_uuid=node_uuid)
     except tenacity.RetryError:
         raise errors.LookupNodeError('Could not look up node info. Check '
                                      'logs for details.')
Esempio n. 12
0
 def wait_for_active_server_channel(self):
     retryer = tenacity.Retrying(
         retry=(tenacity.retry_if_result(lambda r: r is None)
                | tenacity.retry_if_exception_type()),
         wait=tenacity.wait_exponential(min=10, max=25),
         stop=tenacity.stop_after_delay(60 * 3),
         reraise=True)
     logger.info(
         'Waiting for client %s to establish READY gRPC channel with %s',
         self.ip, self.server_target)
     channel = retryer(self.get_active_server_channel)
     logger.info(
         'gRPC channel between client %s and %s transitioned to READY:\n%s',
         self.ip, self.server_target, channel)
class AdobeAnalyticsClient:
    """
    Create an Adobe Client for JWT Authentification.
    Doc: https://github.com/AdobeDocs/adobeio-auth/blob/stage/JWT/JWT.md
    Most of the code is taken from this repo:
    https://github.com/AdobeDocs/analytics-2.0-apis/tree/master/examples/jwt/python
    """

    def __init__(self, client_id, client_secret, tech_account_id, org_id, private_key):
        self.client_id = client_id
        self.client_secret = client_secret
        self.tech_account_id = tech_account_id
        self.org_id = org_id
        self.private_key = private_key

        # Creating jwt_token attribute
        logger.info("Getting jwt_token.")
        self.jwt_token = jwt.encode(
            {
                "exp": datetime.utcnow() + timedelta(seconds=30),
                "iss": self.org_id,
                "sub": self.tech_account_id,
                f"https://{IMS_HOST}/s/ent_analytics_bulk_ingest_sdk": True,
                "aud": f"https://{IMS_HOST}/c/{self.client_id}",
            },
            self.private_key,
            algorithm="RS256",
        )

        # Creating access_token attribute
        logger.info("Getting access_token.")
        self.access_token = self.get_access_token()

    @retry(wait=wait_exponential(multiplier=60, min=60, max=1200), stop=stop_after_delay(3600))
    def get_access_token(self):
        post_body = {"client_id": self.client_id, "client_secret": self.client_secret, "jwt_token": self.jwt_token}
        response = requests.post(IMS_EXCHANGE, data=post_body)
        return response.json()["access_token"]

    def build_request_headers(self, global_company_id):
        """
        Build request headers to be used to interract with Adobe Analytics APIs 2.0.
        """
        return {
            "Accept": "application/json",
            "Authorization": f"Bearer {self.access_token}",
            "Content-Type": "application/json",
            "x-api-key": self.client_id,
            "x-proxy-global-company-id": global_company_id,
        }
Esempio n. 14
0
    def acquire(self, timeout=10):
        """Acquire the lock.

        :params timeout: Maximum time to wait before returning. `None` means
                         forever, any other value equal or greater than 0 is
                         the number of seconds.
        :returns: True if the lock has been acquired, False otherwise.

        """
        stop = (tenacity.stop_never
                if timeout is None else tenacity.stop_after_delay(timeout))

        def wait(previous_attempt_number, delay_since_first_attempt):
            if timeout is None:
                remaining_timeout = None
            else:
                remaining_timeout = max(timeout - delay_since_first_attempt, 0)
            # TODO(jd): Wait for a DELETE event to happen: that'd mean the lock
            # has been released, rather than retrying on PUT events too
            try:
                self.etcd_client.watch_once(self.key, remaining_timeout)
            except exceptions.WatchTimedOut:
                pass
            return 0

        @tenacity.retry(retry=tenacity.retry_never, stop=stop, wait=wait)
        def _acquire():
            # TODO: save the created revision so we can check it later to make
            # sure we still have the lock

            self.lease = self.etcd_client.lease(self.ttl)

            success, _ = self.etcd_client.transaction(
                compare=[self.etcd_client.transactions.create(self.key) == 0],
                success=[
                    self.etcd_client.transactions.put(self.key,
                                                      self.uuid,
                                                      lease=self.lease)
                ],
                failure=[self.etcd_client.transactions.get(self.key)])
            if success is True:
                return True
            self.lease = None
            raise tenacity.TryAgain

        try:
            return _acquire()
        except tenacity.RetryError:
            return False
Esempio n. 15
0
    def clobber(self):
        """Delete this AWS security group and associated resources"""
        if self.clobbered:
            return

        self.check_profile_and_region()

        # Get dependent EC2 instances
        response = clients['ec2'].describe_instances(
            Filters=[{
                'Name': 'vpc-id',
                'Values': [self.vpc_id]
            }])

        def has_security_group(instance, sg_id):
            return sg_id in [d['GroupId'] for d in instance['SecurityGroups']]

        deps = []
        for r in response.get('Reservations'):
            deps = deps + [
                i['InstanceId'] for i in r['Instances']
                if has_security_group(i, self.security_group_id)
            ]

        # Delete the dependent instances
        if deps:
            clients['ec2'].terminate_instances(InstanceIds=deps)
            mod_logger.warning(
                'Deleted dependent EC2 instances: {deps!s}'.format(deps=deps))

        # Delete the security group
        retry = tenacity.Retrying(wait=tenacity.wait_exponential(max=16),
                                  stop=tenacity.stop_after_delay(300),
                                  retry=tenacity.retry_if_exception_type(
                                      botocore.exceptions.ClientError))
        retry.call(clients['ec2'].delete_security_group,
                   GroupId=self.security_group_id)

        # Remove this VPC from the list of VPCs in the config file
        cloudknot.config.remove_resource(self._section_name,
                                         self.security_group_id)

        # Set the clobbered parameter to True,
        # preventing subsequent method calls
        self._clobbered = True

        mod_logger.info('Clobbered security group {id:s}'.format(
            id=self.security_group_id))
Esempio n. 16
0
class PoloniexBaseAPI(metaclass=ABCMeta):
    @abstractmethod
    def __init__(
        self,
        *,
        requests_per_second: int = REQUESTS_PER_SECOND,
    ) -> None:
        self._rate_limiter = RateLimiter(requests_per_second, 1, 1)
        self._requests_per_second = requests_per_second
        self._session = Session()

    @tenacity.retry(
        retry=tenacity.retry_if_exception(_retry_poloniex_error),
        wait=tenacity.wait_exponential() + tenacity.wait_random(0, 1),
        stop=tenacity.stop_after_attempt(4) | tenacity.stop_after_delay(8),
        reraise=True,
    )
    def request(
        self,
        *args,
        **kwargs,
    ) -> Optional[Union[Dict[str, Any], List[Dict[str, Any]]]]:
        cls_name = type(self).__name__
        while self._rate_limiter.check(cls_name) is False:
            sleep(1 / self._requests_per_second)

        request = Request(*args, **kwargs)
        prepared = self._session.prepare_request(request)

        response = self._session.send(prepared)

        status = response.status_code
        if status >= 200 and status < 300:
            data = None
            try:
                data = response.json()
            except ValueError:
                logger.exception('Error decoding response JSON')
            return data
        elif status >= 400 and status < 500:
            raise PoloniexRequestError(response)
        elif status >= 500:
            raise PoloniexServerError(response)
        else:
            # We shouldn't ever get 1xx responses (Poloniex doesn't send them)
            # or 3xx responses (requests follows redirects); return None to
            # make mypy happy
            return None
Esempio n. 17
0
def wait_for_task(master, name, state, delay=1):
    """
    Wait for a task with a certain name to be in a given state.
    """
    @retry(wait=wait_fixed(0.2), stop=stop_after_delay(delay))
    def _wait_for_task():
        tasks = http.get_json(master.addr, None, "tasks")["tasks"]
        for task in tasks:
            if task["name"] == name and task["state"] == state:
                return task
        raise Exception()

    try:
        return _wait_for_task()
    except Exception:
        raise CLIException("Timeout waiting for task expired")
Esempio n. 18
0
    def save_configuration(self, net_connect, cmd='copy running-config startup-config',):

        retry_kwargs = {'wait': tenacity.wait_random(min=2, max=6),
                        'reraise': False,
                        'stop': tenacity.stop_after_delay(30)}

        @tenacity.retry(**retry_kwargs)
        def _save():
            try:
                output = super(CiscoNxosSSH, net_connect).save_config()
                self._check_output(output, cmd)
            except Exception:
                raise
            return output

        return _save()
Esempio n. 19
0
def running_tasks(master):
    """
    Open the master's `/tasks` endpoint and read the task information.
    Retry for up to 1 second before giving up.
    """
    @retry(stop=stop_after_delay(1))
    def _running_tasks():
        tasks = http.get_json(master.addr, "tasks")["tasks"]
        if tasks[0]["state"] == "TASK_RUNNING":
            return tasks
        raise Exception()

    try:
        return _running_tasks()
    except Exception:
        return []
Esempio n. 20
0
def checkout_gethardinfo(task, ip, timeout=TIMEOUT):
    @tenacity.retry(wait=tenacity.wait_fixed(2),
                    stop=tenacity.stop_after_delay(timeout))
    def _checkout():
        with Database_test() as data_t:
            res = data_t.select_info('dhcpinfo', '*', ip)
            if res:
                return res
            else:
                raise

    res = _checkout()
    if res[0] == ip:
        return res[1]
    else:
        raise Exception("%s get dhcp ip failed")
Esempio n. 21
0
def wait_for_task(master, name, state, delay=1):
    """
    Wait for a task with a certain name to be in a given state.
    """
    @retry(wait=wait_fixed(0.2), stop=stop_after_delay(delay))
    def _wait_for_task():
        tasks = http.get_json(master.addr, "tasks")["tasks"]
        for task in tasks:
            if task["name"] == name and task["state"] == state:
                return task
        raise Exception()

    try:
        return _wait_for_task()
    except Exception:
        raise CLIException("Timeout waiting for task expired")
Esempio n. 22
0
    def run_execution(self,
                      execution_id,
                      function_id,
                      version,
                      rlimit=None,
                      input=None,
                      identifier=None,
                      service_url=None,
                      entry='main.main',
                      trust_id=None):
        """Run execution.

        Return a tuple including the result and the output.
        """
        if service_url:
            func_url = '%s/execute' % service_url
            data = utils.get_request_data(self.conf, function_id, version,
                                          execution_id, rlimit, input, entry,
                                          trust_id, self.qinling_endpoint)
            LOG.debug('Invoke function %s(version %s), url: %s, data: %s',
                      function_id, version, func_url, data)

            return utils.url_request(self.session, func_url, body=data)
        else:
            # Wait for image type function execution to be finished
            def _wait_complete():
                pod = self.v1.read_namespaced_pod(
                    identifier, self.conf.kubernetes.namespace)
                status = pod.status.phase
                return True if status == 'Succeeded' else False

            try:
                r = tenacity.Retrying(wait=tenacity.wait_fixed(1),
                                      stop=tenacity.stop_after_delay(180),
                                      retry=tenacity.retry_if_result(
                                          lambda result: result is False))
                r.call(_wait_complete)
            except Exception as e:
                LOG.exception("Failed to get pod output, pod: %s, error: %s",
                              identifier, str(e))
                return False, {'error': 'Function execution failed.'}

            output = self.v1.read_namespaced_pod_log(
                identifier,
                self.conf.kubernetes.namespace,
            )
            return True, output
Esempio n. 23
0
 def connect_agents_to_controlplane_subnet(self):
     self.logging.info("Connecting agents VMs to the control-plane subnet")
     control_plane_subnet = utils.retry_on_error()(
         self.network_client.subnets.get)(
             self.cluster_name, "{}-vnet".format(self.cluster_name),
             "{}-controlplane-subnet".format(self.cluster_name))
     subnet_id = control_plane_subnet.id
     for vm in self._get_agents_vms():
         self.logging.info("Connecting VM {}".format(vm.name))
         nic_id = vm.network_profile.network_interfaces[0].id
         vm_nic = self._get_vm_nic(nic_id)
         nic_address = vm_nic.ip_configurations[0].private_ip_address
         route = self._get_vm_route(nic_address)
         self.logging.info("Shutting down VM")
         utils.retry_on_error()(
             self.compute_client.virtual_machines.begin_deallocate)(
                 self.cluster_name, vm.name).wait()
         self.logging.info("Updating VM NIC subnet")
         nic_parameters = vm_nic.as_dict()
         nic_model = net_models.NetworkInterface(**nic_parameters)
         nic_model.ip_configurations[0]['subnet']['id'] = subnet_id
         utils.retry_on_error()(
             self.network_client.network_interfaces.begin_create_or_update)(
                 self.cluster_name, vm_nic.name, nic_model).wait()
         self.logging.info("Starting VM")
         utils.retry_on_error()(
             self.compute_client.virtual_machines.begin_start)(
                 self.cluster_name, vm.name).wait()
         self.logging.info("Updating the node routetable")
         route_params = route.as_dict()
         vm_nic = self._get_vm_nic(nic_id)  # Refresh NIC info
         nic_address = vm_nic.ip_configurations[0].private_ip_address
         route_params["next_hop_ip_address"] = nic_address
         utils.retry_on_error()(
             self.network_client.routes.begin_create_or_update)(
                 self.cluster_name,
                 "{}-node-routetable".format(self.cluster_name), route.name,
                 route_params).wait()
         self.logging.info(
             "Waiting until VM address is refreshed in the CAPZ cluster")
         for attempt in Retrying(stop=stop_after_delay(10 * 60),
                                 wait=wait_exponential(max=30),
                                 reraise=True):
             with attempt:
                 addresses = self._get_agents_private_addresses("windows")
                 assert nic_address in addresses
Esempio n. 24
0
 def decorator_f(self, *args, **kwargs):
     retry_args = getattr(self, 'retry_args', None)
     if retry_args is None:
         return fun(self, *args, **kwargs)
     multiplier = retry_args.get('multiplier', 1)
     min_limit = retry_args.get('min', 1)
     max_limit = retry_args.get('max', 1)
     stop_after_delay = retry_args.get('stop_after_delay', 10)
     tenacity_logger = tenacity.before_log(self.log, logging.DEBUG) if self.log else None
     default_kwargs = {
         'wait': tenacity.wait_exponential(multiplier=multiplier, max=max_limit, min=min_limit),
         'retry': tenacity.retry_if_exception(should_retry),
         'stop': tenacity.stop_after_delay(stop_after_delay),
         'before': tenacity_logger,
         'after': tenacity_logger,
     }
     return tenacity.retry(**default_kwargs)(fun)(self, *args, **kwargs)
Esempio n. 25
0
def block_until_done(client, bq_job):
    def _is_done(job_id):
        return client.get_job(job_id).state in ["PENDING", "RUNNING"]

    @retry(wait=wait_fixed(10), stop=stop_after_delay(1800), reraise=True)
    def _wait_until_done(job_id):
        return _is_done(job_id)

    job_id = bq_job.job_id
    _wait_until_done(job_id=job_id)

    if not _is_done(job_id):
        client.cancel_job(job_id)
        raise BigQueryJobCancelled(job_id=job_id)

    if bq_job.exception():
        raise bq_job.exception()
Esempio n. 26
0
 def _create_resource_group(self):
     self.logging.info("Creating Azure resource group")
     resource_group_params = {
         'location': self.azure_location,
         'tags': self.resource_group_tags,
     }
     self.resource_mgmt_client.resource_groups.create_or_update(
         self.cluster_name, resource_group_params)
     for attempt in Retrying(stop=stop_after_delay(600),
                             wait=wait_exponential(max=30),
                             retry=retry_if_exception_type(AssertionError),
                             reraise=True):
         with attempt:
             rg = utils.retry_on_error()(
                 self.resource_mgmt_client.resource_groups.get)(
                     self.cluster_name)
             assert rg.properties.provisioning_state == "Succeeded"
Esempio n. 27
0
 def _wait_for_connection(self, node_addresses, timeout=600):
     self.logging.info(
         "Waiting up to %.2f minutes for nodes %s connectivity",
         timeout / 60.0, node_addresses)
     for attempt in Retrying(stop=stop_after_delay(timeout),
                             wait=wait_exponential(max=30),
                             retry=retry_if_exception_type(AssertionError),
                             reraise=True):
         with attempt:
             all_ready = True
             for addr in node_addresses:
                 is_ready = self.deployer.check_k8s_node_connection(addr)
                 if not is_ready:
                     self.logging.warning("Node %s is not up yet", addr)
                     all_ready = False
             assert all_ready
         self.logging.info("All the nodes are up")
Esempio n. 28
0
def cli_runner(project_dir: Path) -> CLIRunner:
    @retry(stop=stop_after_attempt(5) | stop_after_delay(5 * 10))
    def _run_cli(args: List[str],
                 enable_retry: bool = False) -> "CompletedProcess[str]":
        proc = subprocess.run(
            args,
            check=enable_retry,
            capture_output=True,
            text=True,
        )
        if proc.returncode:
            logger.warning(f"Got '{proc.returncode}' for '{' '.join(args)}'")
        logger.warning(proc.stderr)
        logger.info(proc.stdout)
        return proc

    return _run_cli
Esempio n. 29
0
def pg_connection(postgres_dsn: str):
    @tenacity.retry(
        stop=tenacity.stop_after_delay(30),
        wait=tenacity.wait_fixed(3),
        retry=tenacity.retry_if_exception_type(
            (
                psycopg2.Error,
                psycopg2.DatabaseError,
                psycopg2.OperationalError,
            )
        ),
    )
    def connect():
        return psycopg2.connect(dsn=postgres_dsn)

    connection = connect()
    return connection
Esempio n. 30
0
class BaseTopic:
    topic: str
    fail_silently: bool

    class Schema(BaseModel):
        pass

    def __init__(self):
        self.client: MQClient = MQClient(
            settings.MQ_HOST,
            settings.FIN_MQ_ACCESS_ID,
            settings.FIN_MQ_ACCESS_KEY,
            logger=logger,
        )
        self.producer = self.get_producer()

    def get_producer(self) -> Union[MQProducer, MQTransProducer]:
        raise NotImplementedError()

    @retry(reraise=True, stop=(stop_after_delay(5) | stop_after_attempt(3)))
    def _publish(self, message: TopicMessage) -> TopicMessage:
        return self.producer.publish_message(message)

    def make_message(self, body: Dict) -> TopicMessage:
        schema = self.Schema.parse_obj(body)
        return TopicMessage(schema.json())

    def publish(self, body: Dict, *, tag: str = None) -> TopicMessage:
        try:
            message = self.make_message(body)
            if tag is not None:
                tag = tag.lower()
                message.set_message_tag(tag)

            return self._publish(message)
        except Exception as e:
            if not self.fail_silently:
                raise

            logger.error(
                "PublishMessageFailed\ntopic: {}\nmsg: {}\ntag: {}\nerror: {}",
                self.topic,
                body,
                tag,
                repr(e),
            )
Esempio n. 31
0
class RuntimesTest(base.BaseQinlingTest):
    name_prefix = 'RuntimesTest'

    @tenacity.retry(wait=tenacity.wait_fixed(2),
                    stop=tenacity.stop_after_delay(10),
                    retry=tenacity.retry_if_exception_type(AssertionError))
    def _await_runtime_available(self, id):
        resp, body = self.qinling_client.get_obj('runtimes', id)

        self.assertEqual(200, resp.status)
        self.assertEqual('available', body['status'])

    @decorators.idempotent_id('fdc2f07f-dd1d-4981-86d3-5bc7908d9a9b')
    def test_create_list_get_delete_runtime(self):
        name = data_utils.rand_name('runtime', prefix=self.name_prefix)

        req_body = {'name': name, 'image': 'openstackqinling/python-runtime'}
        resp, body = self.qinling_client.post_json('runtimes', req_body)
        runtime_id = body['id']

        self.assertEqual(201, resp.status)
        self.assertEqual(name, body['name'])

        # Get runtimes
        resp, body = self.qinling_client.get_list_objs('runtimes')

        self.assertEqual(200, resp.status)
        self.assertIn(runtime_id,
                      [runtime['id'] for runtime in body['runtimes']])

        # Wait for runtime to be available
        self._await_runtime_available(runtime_id)

        # Check k8s resource
        deploy = self.k8s_v1extention.read_namespaced_deployment(
            runtime_id, namespace=self.namespace)

        self.assertEqual(runtime_id, deploy.metadata.name)
        self.assertEqual(deploy.status.replicas,
                         deploy.status.available_replicas)

        # Delete runtime
        resp, _ = self.qinling_client.delete_obj('runtimes', runtime_id)

        self.assertEqual(204, resp.status)
Esempio n. 32
0
    def wait(self, timeout=60, wait_exp_multiplier=0.05, wait_exp_max=1.0):
        """
        Wait until task is completed. Expires after 'timeout' seconds.
        """
        try:
            retryer = Retrying(wait=wait_random_exponential(multiplier=wait_exp_multiplier, max=wait_exp_max),
                               stop=stop_after_delay(timeout),
                               retry=retry_if_result(is_pending_status),
                               before=before_log(logger, logging.DEBUG),
                               after=after_log(logger, logging.DEBUG))
            retryer(self._refresh_status)
        except RetryError:
            raise TaskTimeout(self.data())

        if is_error_status(self['status']):
            raise TaskError(self.data())

        return self
Esempio n. 33
0
    def __getattr__(self, item):
        method = getattr(self.wrapped_connection, item)

        # we don't want to deal attributes or properties
        if not callable(method):
            return method

        @retry(
            retry=(retry_if_exception_type(redis.exceptions.ConnectionError)
                   | retry_if_exception_type(redis.exceptions.TimeoutError)),
            reraise=True,
            stop=(stop_after_delay(self.retry_period)),
            wait=wait_fixed(min(self.retry_period // 2, self.RETRY_MAX_WAIT)))
        def retrier(*args, **kwargs):
            method = getattr(self.wrapped_connection, item)
            return method(*args, **kwargs)

        return retrier
Esempio n. 34
0
def get_schema_helper(connection, schema_name, retry=True):
    try:
        return _get_schema_helper(connection, schema_name)
    except Exception:
        with excutils.save_and_reraise_exception(reraise=False) as ctx:
            if not retry:
                ctx.reraise = True
            # We may have failed due to set-manager not being called
            helpers.enable_connection_uri(connection)

            # There is a small window for a race, so retry up to a second
            @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.01),
                            stop=tenacity.stop_after_delay(1),
                            reraise=True)
            def do_get_schema_helper():
                return _get_schema_helper(connection, schema_name)

            return do_get_schema_helper()
Esempio n. 35
0
 def get_retry_logic(retry_logic_rules: Optional[Dict]):
     retry_logic_rules = retry_logic_rules or {}
     attempts_no = retry_logic_rules.get("attempts", 3)
     wait_s = retry_logic_rules.get("wait_s", 5)
     retry_logic = {
         "reraise":
         True,
         "stop": (stop_after_attempt(attempts_no) | stop_after_delay(55)),
         "wait":
         wait_fixed(wait_s),
         "retry":
         (retry_if_exception_type(requests.exceptions.ProxyError)
          | retry_if_exception_type(requests.exceptions.ConnectionError)
          | retry_if_exception_type(urllib3.exceptions.NewConnectionError)
          | retry_if_exception_type(
              urllib3.exceptions.ConnectTimeoutError)),
     }
     return retry_logic
Esempio n. 36
0
def get_schema_helper(connection, schema_name, retry=True):
    try:
        return _get_schema_helper(connection, schema_name)
    except Exception:
        with excutils.save_and_reraise_exception(reraise=False) as ctx:
            if not retry:
                ctx.reraise = True
            # We may have failed due to set-manager not being called
            helpers.enable_connection_uri(connection)

            # There is a small window for a race, so retry up to a second
            @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.01),
                            stop=tenacity.stop_after_delay(1),
                            reraise=True)
            def do_get_schema_helper():
                return _get_schema_helper(connection, schema_name)

            return do_get_schema_helper()
Esempio n. 37
0
    def start(self):
        pki_done = False
        for ovsdb_process in self.ovsdb_server_processes:
            # create the db from the schema using ovsdb-tool
            ovsdb_tool_cmd = [
                spawn.find_executable('ovsdb-tool'), 'create',
                ovsdb_process['db_path'], ovsdb_process['schema_path']
            ]
            utils.execute(ovsdb_tool_cmd)

            # start the ovsdb-server
            ovsdb_server_cmd = [
                spawn.find_executable('ovsdb-server'), '-vconsole:off',
                '--log-file=%s' % (ovsdb_process['log_file_path']),
                '--remote=punix:%s' % (ovsdb_process['remote_path']),
                '--unixctl=%s' % (ovsdb_process['unixctl_path'])
            ]
            if ovsdb_process['protocol'] != 'unix':
                ovsdb_server_cmd.append(
                    '--remote=p%s:0:%s' %
                    (ovsdb_process['protocol'], ovsdb_process['remote_ip']))
            if ovsdb_process['protocol'] == 'ssl':
                if not pki_done:
                    pki_done = True
                    self._init_ovsdb_pki()
                ovsdb_server_cmd.append('--private-key=%s' % self.private_key)
                ovsdb_server_cmd.append('--certificate=%s' % self.certificate)
                ovsdb_server_cmd.append('--ca-cert=%s' % self.ca_cert)
            ovsdb_server_cmd.append(ovsdb_process['db_path'])
            obj, _ = utils.create_process(ovsdb_server_cmd)

            @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.1),
                            stop=tenacity.stop_after_delay(10),
                            reraise=True)
            def get_ovsdb_remote_port_retry(pid):
                process = psutil.Process(pid)
                for connect in process.connections():
                    if connect.status == 'LISTEN':
                        return connect.laddr[1]
                raise Exception(_("Could not find LISTEN port."))

            if ovsdb_process['protocol'] != 'unix':
                ovsdb_process['remote_port'] = \
                    get_ovsdb_remote_port_retry(obj.pid)
Esempio n. 38
0
def block_until_done(
    client: Client,
    bq_job: Union[bigquery.job.query.QueryJob, bigquery.job.load.LoadJob],
    timeout: int = 1800,
    retry_cadence: float = 1,
):
    """
    Waits for bq_job to finish running, up to a maximum amount of time specified by the timeout parameter (defaulting to 30 minutes).

    Args:
        client: A bigquery.client.Client to monitor the bq_job.
        bq_job: The bigquery.job.QueryJob that blocks until done runnning.
        timeout: An optional number of seconds for setting the time limit of the job.
        retry_cadence: An optional number of seconds for setting how long the job should checked for completion.

    Raises:
        BigQueryJobStillRunning exception if the function has blocked longer than 30 minutes.
        BigQueryJobCancelled exception to signify when that the job has been cancelled (i.e. from timeout or KeyboardInterrupt).
    """

    # For test environments, retry more aggressively
    if flags_helper.is_test():
        retry_cadence = 0.1

    def _wait_until_done(bq_job):
        if client.get_job(bq_job).state in ["PENDING", "RUNNING"]:
            raise BigQueryJobStillRunning(job_id=bq_job.job_id)

    try:
        retryer = Retrying(
            wait=wait_fixed(retry_cadence),
            stop=stop_after_delay(timeout),
            retry=retry_if_exception_type(BigQueryJobStillRunning),
            reraise=True,
        )
        retryer(_wait_until_done, bq_job)

    finally:
        if client.get_job(bq_job).state in ["PENDING", "RUNNING"]:
            client.cancel_job(bq_job)
            raise BigQueryJobCancelled(job_id=bq_job.job_id)

        if bq_job.exception():
            raise bq_job.exception()
Esempio n. 39
0
    def _wait_for_reboot(self, task, timeout):
        wait = CONF.agent.post_deploy_get_power_state_retry_interval
        if not timeout:
            timeout = CONF.agent.post_deploy_get_power_state_retries * wait

        @tenacity.retry(
            stop=tenacity.stop_after_delay(timeout),
            retry=(tenacity.retry_if_result(lambda result: not result)
                   | tenacity.retry_if_exception_type(
                exception.AgentConnectionFailed)),
            wait=tenacity.wait_fixed(wait),
            reraise=True)
        def _wait_until_rebooted(task):
            try:
                status = self._client.get_commands_status(
                    task.node, retry_connection=False, expect_errors=True)
            except exception.AgentConnectionFailed:
                LOG.debug('Still waiting for the agent to come back on the '
                          'node %s', task.node.uuid)
                raise

            if any(cmd['command_name'] == agent_client.REBOOT_COMMAND
                   for cmd in status):
                LOG.debug('Still waiting for the agent to power off on the '
                          'node %s', task.node.uuid)
                return False

            return True

        try:
            _wait_until_rebooted(task)
        except exception.AgentConnectionFailed as exc:
            msg = _('Agent failed to come back on %(node)s with the "agent" '
                    'power interface: %(exc)s') % {
                        'node': task.node.uuid, 'exc': exc}
            LOG.error(msg)
            raise exception.PowerStateFailure(msg)
        except Exception as exc:
            LOG.error('Could not reboot node %(node)s with the "agent" power '
                      'interface: %(exc)s',
                      {'node': task.node.uuid, 'exc': exc})
            raise exception.PowerStateFailure(
                _('Unexpected error when rebooting through the agent: %s')
                % exc)
Esempio n. 40
0
def idl_factory():
    conn = cfg.CONF.OVS.ovsdb_connection
    schema_name = 'Open_vSwitch'
    try:
        helper = idlutils.get_schema_helper(conn, schema_name)
    except Exception:
        helpers.enable_connection_uri(conn)

        @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.01),
                        stop=tenacity.stop_after_delay(1),
                        reraise=True)
        def do_get_schema_helper():
            return idlutils.get_schema_helper(conn, schema_name)

        helper = do_get_schema_helper()

    # TODO(twilson) We should still select only the tables/columns we use
    helper.register_all()
    return idl.Idl(conn, helper)
Esempio n. 41
0
    def get_schema_helper(self):
        """Retrieve the schema helper object from OVSDB"""
        try:
            helper = idlutils.get_schema_helper(self.connection,
                                                self.schema_name)
        except Exception:
            # We may have failed do to set-manager not being called
            helpers.enable_connection_uri(self.connection)

            # There is a small window for a race, so retry up to a second
            @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.01),
                            stop=tenacity.stop_after_delay(1),
                            reraise=True)
            def do_get_schema_helper():
                return idlutils.get_schema_helper(self.connection,
                                                  self.schema_name)
            helper = do_get_schema_helper()

        return helper
Esempio n. 42
0
    def get_schema_helper(self):
        """Retrieve the schema helper object from OVSDB"""
        # The implementation of this function is same as the base class method
        # without the enable_connection_uri() called (since ovs-vsctl won't
        # exist on the controller node when using the reference architecture).
        try:
            helper = idlutils.get_schema_helper(self.connection,
                                                self.schema_name)
        except Exception:
            # There is a small window for a race, so retry up to a second
            @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.01),
                            stop=tenacity.stop_after_delay(1),
                            reraise=True)
            def do_get_schema_helper():
                return idlutils.get_schema_helper(self.connection,
                                                  self.schema_name)
            helper = do_get_schema_helper()

        return helper
Esempio n. 43
0
    def post_write(self, db="influxdb"):

        creator = pecan.request.auth_helper.get_current_user(pecan.request)
        tag_to_rid = pecan.request.headers.get(
            "X-Gnocchi-InfluxDB-Tag-Resource-ID",
            self.DEFAULT_TAG_RESOURCE_ID)

        while True:
            encoding, chunk = self._write_get_lines()

            # If chunk is empty then this is over.
            if not chunk:
                break

            # Compute now on a per-chunk basis
            now = numpy.datetime64(int(time.time() * 10e8), 'ns')

            # resources = { resource_id: {
            #     metric_name: [ incoming.Measure(t, v), …], …
            #   }, …
            # }
            resources = collections.defaultdict(
                lambda: collections.defaultdict(list))
            for line_number, line in enumerate(chunk.split(b"\n")):
                # Ignore empty lines
                if not line:
                    continue

                try:
                    measurement, tags, fields, timestamp = (
                        line_protocol.parseString(line.decode())
                    )
                except (UnicodeDecodeError, SyntaxError,
                        pyparsing.ParseException):
                    api.abort(400, {
                        "cause": "Value error",
                        "detail": "line",
                        "reason": "Unable to parse line %d" % (
                            line_number + 1),
                    })

                if timestamp is None:
                    timestamp = now

                try:
                    resource_id = tags.pop(tag_to_rid)
                except KeyError:
                    api.abort(400, {
                        "cause": "Value error",
                        "detail": "key",
                        "reason": "Unable to find key `%s' in tags" % (
                            tag_to_rid),
                    })

                tags_str = (("@" if tags else "") +
                            ",".join(("%s=%s" % (k, tags[k]))
                                     for k in sorted(tags)))

                for field_name, field_value in six.iteritems(fields):
                    if isinstance(field_value, str):
                        # We do not support field value that are not numerical
                        continue

                    # Metric name is the:
                    # <measurement>.<field_key>@<tag_key>=<tag_value>,…
                    # with tag ordered
                    # Replace "/" with "_" because Gnocchi does not support /
                    # in metric names
                    metric_name = (
                        measurement + "." + field_name + tags_str
                    ).replace("/", "_")

                    resources[resource_id][metric_name].append(
                        incoming.Measure(timestamp, field_value))

            measures_to_batch = {}
            for resource_name, metrics_and_measures in six.iteritems(
                    resources):
                resource_name = resource_name
                resource_id = utils.ResourceUUID(
                    resource_name, creator=creator)
                LOG.debug("Getting metrics from resource `%s'", resource_name)
                timeout = pecan.request.conf.api.operation_timeout
                metrics = (
                    api.get_or_create_resource_and_metrics.retry_with(
                        stop=tenacity.stop_after_delay(timeout))(
                            creator, resource_id, resource_name,
                            metrics_and_measures.keys(),
                            {}, db)
                )

                for metric in metrics:
                    api.enforce("post measures", metric)

                measures_to_batch.update(
                    dict((metric.id, metrics_and_measures[metric.name])
                         for metric in metrics
                         if metric.name in metrics_and_measures))

            LOG.debug("Add measures batch for %d metrics",
                      len(measures_to_batch))
            pecan.request.incoming.add_measures_batch(measures_to_batch)
            pecan.response.status = 204

            if encoding != "chunked":
                return
Esempio n. 44
0
    def process_data(self):
        stations = {}
        try:
            self.log.info('Processing FFVL data...')

            result = requests.get(
                'http://data.ffvl.fr/json/balises.json', timeout=(self.connect_timeout, self.read_timeout))
            ffvl_stations = result.json()

            for ffvl_station in ffvl_stations:
                ffvl_id = None
                try:
                    ffvl_id = ffvl_station['idBalise']
                    station = self.save_station(
                        ffvl_id,
                        ffvl_station['nom'],
                        ffvl_station['nom'],
                        ffvl_station['latitude'],
                        ffvl_station['longitude'],
                        Status.GREEN,
                        altitude=ffvl_station['altitude'],
                        url=ffvl_station['url'])
                    stations[station['_id']] = station

                except ProviderException as e:
                    self.log.warn(f"Error while processing station '{ffvl_id}': {e}")
                except Exception as e:
                    self.log.exception(f"Error while processing station '{ffvl_id}': {e}")

        except ProviderException as e:
            self.log.warn(f'Error while processing stations: {e}')
        except Exception as e:
            self.log.exception(f'Error while processing stations: {e}')

        try:
            @retry(wait=wait_random(min=1, max=3), stop=(stop_after_delay(15)))
            def request_data():
                # data.ffvl.fr randomly returns an empty file instead the json doc
                result = requests.get(
                    'http://data.ffvl.fr/json/relevesmeteo.json', timeout=(self.connect_timeout, self.read_timeout))
                return result.json()

            ffvl_measures = request_data()

            ffvl_tz = tz.gettz('Europe/Paris')
            for ffvl_measure in ffvl_measures:
                station_id = None
                try:
                    ffvl_id = ffvl_measure['idbalise']
                    station_id = self.get_station_id(ffvl_id)
                    if station_id not in stations:
                        raise ProviderException(f"Unknown station '{station_id}'")
                    station = stations[station_id]

                    measures_collection = self.measures_collection(station_id)
                    new_measures = []

                    key = arrow.get(ffvl_measure['date'], 'YYYY-MM-DD HH:mm:ss').replace(tzinfo=ffvl_tz).timestamp

                    if not self.has_measure(measures_collection, key):
                        measure = self.create_measure(
                            station,
                            key,
                            ffvl_measure['directVentMoy'],
                            ffvl_measure['vitesseVentMoy'],
                            ffvl_measure['vitesseVentMax'],
                            temperature=ffvl_measure['temperature'],
                            humidity=ffvl_measure['hydrometrie'],
                            pressure=Pressure(qfe=ffvl_measure['pression'], qnh=None, qff=None)
                        )
                        new_measures.append(measure)

                    self.insert_new_measures(measures_collection, station, new_measures)

                except ProviderException as e:
                    self.log.warn(f"Error while processing measures for station '{station_id}': {e}")
                except Exception as e:
                    self.log.exception(f"Error while processing measures for station '{station_id}': {e}")

        except ProviderException as e:
            self.log.warn(f'Error while processing FFVL: {e}')
        except Exception as e:
            self.log.exception(f'Error while processing FFVL: {e}')

        self.log.info('...Done!')
Esempio n. 45
0
import tenacity


@pytest.fixture(scope='session')
def docker_services(docker_services):
    """Waits until the app in Docker becomes responsive.
    Overwrites the fixture from pytest-docker.
    """
    waiter_port = docker_services.port_for('waiter', 8080)
    waiter_url = f'http://localhost:{waiter_port}'
    _wait_for_compose(waiter_url)
    return docker_services


@tenacity.retry(
    stop=tenacity.stop_after_delay(10),
    wait=tenacity.wait_fixed(0.1),
)
def _wait_for_compose(app_url: str):
    response = requests.get(app_url)
    response.raise_for_status()


@pytest.fixture(scope='session')
def app_url(docker_services):
    app_port = docker_services.port_for('api', 8080)
    return f'http://localhost:{app_port}'


def test_names_from_greetings_get_saved(app_url, docker_services):
    names = ['Wieńczysław', 'Spycigniew', 'Perystaltyka']
Esempio n. 46
0
def assert_wait(func, exc_type=AssertionError):
  """Waits for function to succeed (not raise `exc_type`)."""
  return tenacity.Retrying(
      stop=tenacity.stop_after_delay(constants.ux.MAX_USER_WAIT_SECONDS),
      retry=tenacity.retry_if_exception_type((exc_type, tenacity.TryAgain)))(
      func)