Ejemplo n.º 1
0
    def request(self, method, url, **kwargs):
        """
        Wrapper method of ``requests.request`` adding retry, timeout and headers.

        If the actual request fails to connect or timed out, this client will retry the
        same request if ``retry`` is truthy after Arborist becomes healthy.
        By default, it will retry health check up to 5 times, waiting for a maximum of
        10 seconds, before giving up and declaring Arborist unavailable.

        :param expect_json: True (default) if the response should be in JSON format
        :param retry: True (default) if the request should be retried, or a dict as
                      keyword arguments for ``backoff.on_predicate``
        :param timeout: overwrite timeout parameter for ``requests``
        """
        expect_json = kwargs.pop("expect_json", True)
        kwargs = self._env.get_current_with(kwargs)
        retry = kwargs.pop("retry", True)
        authz_provider = kwargs.pop("authz_provider", self._authz_provider)

        kwargs.setdefault("timeout", self._timeout)
        if authz_provider:
            headers = kwargs.setdefault("headers", {})
            headers["X-AuthZ-Provider"] = authz_provider
        try:
            rv = requests.request(method, url, **kwargs)
        except (
            requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout,
        ):
            if retry:
                if isinstance(retry, bool):
                    retry = {}
                # set some defaults for when to give up: after 5 failures, or 10 seconds
                # (these can be overridden by keyword arguments)
                retry.setdefault("max_tries", 5)
                retry.setdefault("max_time", 10)

                def giveup():
                    raise ArboristUnhealthyError()

                def wait_gen():
                    # shorten the wait times between retries a little to fit our scale a
                    # little better (aim to give up within 10 s)
                    for n in backoff.fibo():
                        yield n / 2.0

                backoff.on_predicate(wait_gen, on_giveup=giveup, **retry)(self.healthy)
                rv = requests.request(method, url, **kwargs)
            else:
                raise
        return ArboristResponse(rv, expect_json=expect_json)
Ejemplo n.º 2
0
 def wrapper(self, *m_args, **m_kwargs):
     do_backoff = backoff.on_predicate(wait_gen,
                                       on_giveup=giveup,
                                       *backoff_args,
                                       **backoff_kwargs)
     do_backoff(self.healthy)
     return method(self, *m_args, **m_kwargs)
Ejemplo n.º 3
0
def decorate(func, args):
    interval = 10
    return backoff.on_predicate(
        backoff.constant,
        interval=interval,
        max_tries=args.max_wait * 60 / interval,
        jitter=lambda value: value,
    )(func)
Ejemplo n.º 4
0
    def test_backoff_call(self):
        # pylint: disable=protected-access
        wait_gen, max_tries = jenkins._backoff_timeout(timeout=.36,
                                                       base=2,
                                                       factor=.0001)
        always_false = Mock(return_value=False)

        count_retries = backoff.on_predicate(
            wait_gen,
            max_tries=max_tries,
            on_backoff=print,
            jitter=None,
        )(always_false.__call__)

        count_retries()

        self.assertEqual(always_false.call_count, 13)
Ejemplo n.º 5
0
 def request(self,
             method,
             url,
             params=None,
             json=None,
             headers=None,
             user_agent=None):
     retry = backoff.on_predicate(
         backoff.expo,
         lambda response: response.status_code == HTTPStatus.
         TOO_MANY_REQUESTS,
         max_time=self.timeout,
         jitter=backoff.full_jitter,
     )
     response = retry(self._request)(method, url, params, json, headers,
                                     user_agent)
     return self._handle_response(response)
Ejemplo n.º 6
0
 def cleanup_for_destroy(self):
     try:
         self._v1_apps.delete_namespaced_stateful_set(
             "docker", KUBESYSTEM_NAMESPACE)
     except client.rest.ApiException as e:
         if e.status != http_client.NOT_FOUND:
             raise
     selector = "orchestrate/cleanup-before-destroy"
     for pvc in self._v1_core.list_persistent_volume_claim_for_all_namespaces(
             label_selector=selector, ).items:
         self._v1_core.delete_namespaced_persistent_volume_claim(
             pvc.metadata.name, pvc.metadata.namespace)
     remaining_pvs = backoff.on_predicate(
         backoff.expo, lambda pvs: len(pvs.items) > 0,
         max_time=120)(self._v1_core.list_persistent_volume)()
     if remaining_pvs.items:
         raise CleanupFailedException(
             "Some volumes could not be cleaned up, please remove them before destroying the cluster"
         )
Ejemplo n.º 7
0
    def request(self, method, url, data=None, params=None, headers=None, retry_count=0, resource_name=None,
                retry_when_empty_result=None):

        def http_status_codes_to_retry():
            retry_codes = self.backoff_strategy.pop(RETRY_HTTP_STATUS_CODES, [])
            retry_codes = list(map(int, retry_codes)) if isinstance(retry_codes, (list, tuple)) else [int(retry_codes)]
            retry_codes.append(429)
            return retry_codes

        retry_http_status_codes = http_status_codes_to_retry()

        def fatal_code(e):
            return isinstance(e, error.AuthorisationError) or \
                   isinstance(e, error.HttpError) and \
                   400 <= e.status_code < 500 and \
                   e.status_code not in retry_http_status_codes

        def enable_backoff_logs():
            logging.getLogger('backoff').addHandler(logging.StreamHandler())

        enable_backoff_logs()
        decorated_request = backoff.on_exception(
            self.backoff_strategy.pop('wait_gen', backoff.constant),
            (error.ConnectionError, error.Timeout, error.TooManyRedirects, error.HttpError),
            max_time=self.backoff_max_time,
            giveup=fatal_code,
            **copy.deepcopy(self.backoff_strategy)
        )(self._request)

        if retry_when_empty_result:
            self.retry_when_empty_result = retry_when_empty_result
            decorated_request = backoff.on_predicate(
                wait_gen=self.backoff_strategy.pop('wait_gen', backoff.constant),
                predicate=self._response_does_not_have_data,
                max_time=self.backoff_max_time,
                **copy.deepcopy(self.backoff_strategy)
            )(decorated_request)

        return decorated_request(method, url, data=data,
                                 params=params, headers=headers,
                                 retry_count=retry_count,
                                 resource_name=resource_name)
def _gzip_payload(headers, data, should_gzip):
    if len(data) > 2048 and should_gzip:
        headers['Content-Encoding'] = 'gzip'
        import gzip
        from io import BytesIO
        zipped_data = BytesIO()
        with gzip.GzipFile(filename='', mode='wb', fileobj=zipped_data) as f:
            f.write(data.encode())
        zipped_data.seek(0)

        return headers, zipped_data
    return headers, data


_exponential_backoff = backoff.on_predicate(
    backoff.expo,
    lambda response: response.status_code in [500, 502, 503],
    max_tries=5)


class JsonEncoder(json.JSONEncoder):

    def default(self, obj):
        if isinstance(obj, datetime.datetime):
            if obj.tzinfo is None:
                obj = obj.replace(tzinfo=pytz.UTC)
            return obj.isoformat()
        return super(JsonEncoder, self).default(obj)


def _encode_json(data):
    return json.dumps(data, cls=JsonEncoder)
Ejemplo n.º 9
0
    def request(self, method, url, data=None, params=None, headers=None, retry_count=0, resource_name=None,
                retry_when_empty_result=None, backoff_strategy=None):

        if retry_when_empty_result:
            self.logger.warning('The use of `retry_when_empty_result` parameter directly is deprecated '
                                'and will be removed in future releases, '
                                'please use it as a key of the dictionary `backoff_strategy`.')

        _backoff_strategy = copy.deepcopy(self.backoff_strategy) if not backoff_strategy else backoff_strategy
        _backoff_max_time = _backoff_strategy.pop('max_time', self.backoff_max_time)
        _retry_when_empty_result = _backoff_strategy.pop('retry_when_empty_result') \
            if 'retry_when_empty_result' in _backoff_strategy else retry_when_empty_result

        if 'retry_when_empty_result' in _backoff_strategy and retry_when_empty_result:
            self.logger.warning('The `retry_when_empty_result` within `backoff_strategy` precedes the direct '
                                'argument `retry_when_empty_result`, which will be deprecated in a future release.')

        def http_status_codes_to_retry():
            retry_codes = _backoff_strategy.pop(RETRY_HTTP_STATUS_CODES, [])
            retry_codes = list(map(int, retry_codes)) if isinstance(retry_codes, (list, tuple)) else [int(retry_codes)]
            retry_codes.append(429)
            return retry_codes

        retry_http_status_codes = http_status_codes_to_retry()

        def fatal_code(e):
            return isinstance(e, error.AuthorisationError) or \
                   isinstance(e, error.HttpError) and \
                   400 <= e.status_code < 500 and \
                   e.status_code not in retry_http_status_codes

        def _response_does_not_have_data(ret):
            linked_resources_have_data = [False]
            if 'linked' in ret.data and _retry_when_empty_result and isinstance(_retry_when_empty_result, dict):
                linked_resources_have_data = [(k in ret.data['linked'] and not bool(ret.data['linked'][k]))
                                              for k, v in _retry_when_empty_result.items() if v]
            elif 'linked' in ret.data and _retry_when_empty_result:
                linked_resources_have_data = [not bool(ret.data['linked'][k])
                                              for k in ret.data['linked'].keys()]

            main_resource_has_data = ret.resource_name and not bool(ret.data[ret.resource_name])
            return main_resource_has_data or any(linked_resources_have_data)

        def backoff_handler(details):
            self.logger.warning('Retry `%s` (%.1fs) for args `%s` and kwargs `%s`', details['tries'], details['wait'],
                                details['args'], details['kwargs'])

        def backoff_giveup_handler(details):
            self.logger.error('Backoff giving up after %d tries', details['tries'])

        decorated_request = backoff.on_exception(
            _backoff_strategy.pop('wait_gen', backoff.constant),
            (error.ConnectionError, error.Timeout, error.TooManyRedirects, error.HttpError),
            max_time=_backoff_max_time,
            giveup=fatal_code,
            on_backoff=backoff_handler,
            on_giveup=backoff_giveup_handler,
            **copy.deepcopy(_backoff_strategy)
        )(self._request)

        if _retry_when_empty_result:
            decorated_request = backoff.on_predicate(
                wait_gen=_backoff_strategy.pop('wait_gen', backoff.constant),
                predicate=_response_does_not_have_data,
                max_time=_backoff_max_time,
                on_backoff=backoff_handler,
                on_giveup=backoff_giveup_handler,
                **copy.deepcopy(_backoff_strategy)
            )(decorated_request)

        return decorated_request(method, url, data=data,
                                 params=params, headers=headers,
                                 retry_count=retry_count,
                                 resource_name=resource_name)
Ejemplo n.º 10
0
def _gzip_payload(headers, data, should_gzip):
    if len(data) > 2048 and should_gzip:
        headers['Content-Encoding'] = 'gzip'
        import gzip
        from io import BytesIO
        zipped_data = BytesIO()
        with gzip.GzipFile(filename='', mode='wb', fileobj=zipped_data) as f:
            f.write(data.encode())
        zipped_data.seek(0)

        return headers, zipped_data
    return headers, data


_exponential_backoff = backoff.on_predicate(
    backoff.expo,
    lambda response: response.status_code in [500, 502, 503],
    max_tries=5)


class JsonEncoder(json.JSONEncoder):

    def default(self, obj):
        if isinstance(obj, datetime.datetime):
            if obj.tzinfo is None:
                obj = obj.replace(tzinfo=pytz.UTC)
            return obj.isoformat()
        return super(JsonEncoder, self).default(obj)


def _encode_json(data):
    return json.dumps(data, cls=JsonEncoder)