Esempio n. 1
0
def build_retry_config(endpoint_prefix, retry_model, definitions):
    service_config = retry_model.get(endpoint_prefix, {})
    resolve_references(service_config, definitions)
    # We want to merge the global defaults with the service specific
    # defaults, with the service specific defaults taking precedence.
    # So we use the global defaults as the base.
    final_retry_config = {'__default__': retry_model.get('__default__', {})}
    resolve_references(final_retry_config, definitions)
    # The merge the service specific config on top.
    merge_dicts(final_retry_config, service_config)
    return final_retry_config
Esempio n. 2
0
 def build_full_result(self):
     complete_result = {}
     for response in self:
         page = response
         # We want to try to catch operation object pagination
         # and format correctly for those. They come in the form
         # of a tuple of two elements: (http_response, parsed_responsed).
         # We want the parsed_response as that is what the page iterator
         # uses. We can remove it though once operation objects are removed.
         if isinstance(response, tuple) and len(response) == 2:
             page = response[1]
         # We're incrementally building the full response page
         # by page.  For each page in the response we need to
         # inject the necessary components from the page
         # into the complete_result.
         for result_expression in self.result_keys:
             # In order to incrementally update a result key
             # we need to search the existing value from complete_result,
             # then we need to search the _current_ page for the
             # current result key value.  Then we append the current
             # value onto the existing value, and re-set that value
             # as the new value.
             result_value = result_expression.search(page)
             if result_value is None:
                 continue
             existing_value = result_expression.search(complete_result)
             if existing_value is None:
                 # Set the initial result
                 set_value_from_jmespath(complete_result,
                                         result_expression.expression,
                                         result_value)
                 continue
             # Now both result_value and existing_value contain something
             if isinstance(result_value, list):
                 existing_value.extend(result_value)
             elif isinstance(result_value, (int, float, six.string_types)):
                 # Modify the existing result with the sum or concatenation
                 set_value_from_jmespath(complete_result,
                                         result_expression.expression,
                                         existing_value + result_value)
     merge_dicts(complete_result, self.non_aggregate_part)
     if self.resume_token is not None:
         complete_result['NextToken'] = self.resume_token
     return complete_result
Esempio n. 3
0
def build_retry_config(endpoint_prefix,
                       retry_model,
                       definitions,
                       client_retry_config=None):
    service_config = retry_model.get(endpoint_prefix, {})
    resolve_references(service_config, definitions)
    # We want to merge the global defaults with the service specific
    # defaults, with the service specific defaults taking precedence.
    # So we use the global defaults as the base.
    #
    # A deepcopy is done on the retry defaults because it ensures the
    # retry model has no chance of getting mutated when the service specific
    # configuration or client retry config is merged in.
    final_retry_config = {
        '__default__': copy.deepcopy(retry_model.get('__default__', {}))
    }
    resolve_references(final_retry_config, definitions)
    # The merge the service specific config on top.
    merge_dicts(final_retry_config, service_config)
    if client_retry_config is not None:
        _merge_client_retry_config(final_retry_config, client_retry_config)
    return final_retry_config
Esempio n. 4
0
 def _parse_error_from_body(self, response):
     xml_contents = response['body']
     root = self._parse_xml_string_to_dom(xml_contents)
     parsed = self._build_name_to_xml_node(root)
     self._replace_nodes(parsed)
     if root.tag == 'Error':
         # This is an S3 error response.  First we'll populate the
         # response metadata.
         metadata = self._populate_response_metadata(response)
         # The RequestId and the HostId are already in the
         # ResponseMetadata, but are also duplicated in the XML
         # body.  We don't need these values in both places,
         # we'll just remove them from the parsed XML body.
         parsed.pop('RequestId', '')
         parsed.pop('HostId', '')
         return {'Error': parsed, 'ResponseMetadata': metadata}
     elif 'RequestId' in parsed:
         # Other rest-xml serivces:
         parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
     default = {'Error': {'Message': '', 'Code': ''}}
     merge_dicts(default, parsed)
     return default
Esempio n. 5
0
    def _clone(self, **kwargs):
        """
        Create a clone of this collection. This is used by the methods
        below to provide a chainable interface that returns copies
        rather than the original. This allows things like:

            >>> base = collection.filter(Param1=1)
            >>> query1 = base.filter(Param2=2)
            >>> query2 = base.filter(Param3=3)
            >>> query1.params
            {'Param1': 1, 'Param2': 2}
            >>> query2.params
            {'Param1': 1, 'Param3': 3}

        :rtype: :py:class:`ResourceCollection`
        :return: A clone of this resource collection
        """
        params = copy.deepcopy(self._params)
        merge_dicts(params, kwargs, append_lists=True)
        clone = self.__class__(self._model, self._parent, self._handler,
                               **params)
        return clone
Esempio n. 6
0
    def pages(self):
        """
        A generator which yields pages of resource instances after
        doing the appropriate service operation calls and handling
        any pagination on your behalf. Non-paginated calls will
        return a single page of items.

        Page size, item limit, and filter parameters are applied
        if they have previously been set.

            >>> bucket = s3.Bucket('boto3')
            >>> for page in bucket.objects.pages():
            ...     for obj in page:
            ...         print(obj.key)
            'key1'
            'key2'

        :rtype: list(:py:class:`~boto3.resources.base.ServiceResource`)
        :return: List of resource instances
        """
        client = self._parent.meta.client
        cleaned_params = self._params.copy()
        limit = cleaned_params.pop('limit', None)
        page_size = cleaned_params.pop('page_size', None)
        params = create_request_parameters(self._parent, self._model.request)
        merge_dicts(params, cleaned_params, append_lists=True)

        # Is this a paginated operation? If so, we need to get an
        # iterator for the various pages. If not, then we simply
        # call the operation and return the result as a single
        # page in a list. For non-paginated results, we just ignore
        # the page size parameter.
        if client.can_paginate(self._py_operation_name):
            logger.info('Calling paginated %s:%s with %r',
                        self._parent.meta.service_name,
                        self._py_operation_name, params)
            paginator = client.get_paginator(self._py_operation_name)
            pages = paginator.paginate(PaginationConfig={
                'MaxItems': limit,
                'PageSize': page_size
            },
                                       **params)
        else:
            logger.info('Calling %s:%s with %r',
                        self._parent.meta.service_name,
                        self._py_operation_name, params)
            pages = [getattr(client, self._py_operation_name)(**params)]

        # Now that we have a page iterator or single page of results
        # we start processing and yielding individual items.
        count = 0
        for page in pages:
            page_items = []
            for item in self._handler(self._parent, params, page):
                page_items.append(item)

                # If the limit is set and has been reached, then
                # we stop processing items here.
                count += 1
                if limit is not None and count >= limit:
                    break

            yield page_items

            # Stop reading pages if we've reached out limit
            if limit is not None and count >= limit:
                break