コード例 #1
0
ファイル: test_utils.py プロジェクト: zined/botocore
 def test_new_keys(self):
     first = {'one': {'inner': 'ORIGINAL'}, 'two': {'inner': 'ORIGINAL'}}
     second = {'three': {'foo': {'bar': 'baz'}}}
     # In this case, second has no keys in common, but we'd still expect
     # this to get merged.
     merge_dicts(first, second)
     self.assertEqual(first['three']['foo']['bar'], 'baz')
コード例 #2
0
    def _get_paginated_results(self, limit, marker, collection):
        """
        If a Boto Paginator is available, use it. The results
        are converted back into BotoResources by directly accessing
        protected members of ResourceCollection. This logic can be removed
        depending on issue: https://github.com/boto/boto3/issues/1268.
        """
        # pylint:disable=protected-access
        cleaned_params = collection._params.copy()
        cleaned_params.pop('limit', None)
        cleaned_params.pop('page_size', None)
        # pylint:disable=protected-access
        params = create_request_parameters(collection._parent,
                                           collection._model.request)
        merge_dicts(params, cleaned_params, append_lists=True)

        client = self.boto_conn.meta.client
        list_op = self._get_list_operation()
        paginator = client.get_paginator(list_op)
        PaginationConfig = {}
        if limit:
            PaginationConfig = {'MaxItems': limit, 'PageSize': limit}

        if marker:
            PaginationConfig.update({'StartingToken': marker})

        params.update({'PaginationConfig': PaginationConfig})
        args = trim_empty_params(params)
        pages = paginator.paginate(**args)
        # resume_token is not populated unless the iterator is used
        items = pages.build_full_result()

        boto_objs = self._to_boto_resource(collection, args, items)
        resume_token = pages.resume_token
        return (resume_token, boto_objs)
コード例 #3
0
ファイル: utils.py プロジェクト: tmwong2003/moto
def merge_filters(filters_to_update, filters_to_merge):
    """Given two groups of filters, merge the second into the first.

    List values are appended instead of overwritten:

    >>> merge_filters({'filter-name': ['value1']}, {'filter-name':['value2']})
    >>> {'filter-name': ['value1', 'value2']}

    :param filters_to_update:
        The filters to update.
    :type filters_to_update:
        dict[str, list] or None
    :param filters_to_merge:
        The filters to merge.
    :type filters_to_merge:
        dict[str, list] or None
    :returns:
        The updated filters.
    :rtype:
        dict[str, list]
    """
    if filters_to_update is None:
        filters_to_update = {}
    if filters_to_merge is None:
        filters_to_merge = {}
    merge_dicts(filters_to_update, filters_to_merge, append_lists=True)
    return filters_to_update
コード例 #4
0
ファイル: translate.py プロジェクト: lolo-pop/dast
def translate(model):
    new_model = deepcopy(model.model)
    new_model.update(model.enhancements.get('extra', {}))
    try:
        del new_model['pagination']
    except KeyError:
        pass
    handle_op_renames(new_model, model.enhancements)
    handle_remove_deprecated_params(new_model, model.enhancements)
    handle_remove_deprecated_operations(new_model, model.enhancements)
    handle_filter_documentation(new_model, model.enhancements)
    handle_rename_params(new_model, model.enhancements)
    add_pagination_configs(
        new_model,
        model.enhancements.get('pagination', {}))
    add_waiter_configs(
        new_model,
        model.enhancements.get('waiters', {}))
    # Merge in any per operation overrides defined in the .extras.json file.
    merge_dicts(new_model['operations'],
                model.enhancements.get('operations', {}))
    add_retry_configs(
        new_model, model.retry.get('retry', {}),
        definitions=model.retry.get('definitions', {}))
    return new_model
コード例 #5
0
ファイル: paginate.py プロジェクト: j3tm0t0/botocore
 def build_full_result(self):
     complete_result = {}
     # Prepopulate the result keys with an empty list.
     for result_expression in self.result_keys:
         set_value_from_jmespath(complete_result, result_expression.expression, [])
     for _, page in self:
         # We're incrementally building the full response page
         # by page.  For each page in the response we need to
         # inject the necessary components from the page
         # into the complete_result.
         for result_expression in self.result_keys:
             # In order to incrementally update a result key
             # we need to search the existing value from complete_result,
             # then we need to search the _current_ page for the
             # current result key value.  Then we append the current
             # value onto the existing value, and re-set that value
             # as the new value.
             existing_value = result_expression.search(complete_result)
             result_value = result_expression.search(page)
             if result_value is not None:
                 existing_value.extend(result_value)
     merge_dicts(complete_result, self.non_aggregate_part)
     if self.resume_token is not None:
         complete_result["NextToken"] = self.resume_token
     return complete_result
コード例 #6
0
 def build_full_result(self):
     complete_result = {}
     # Prepopulate the result keys with an empty list.
     for result_expression in self.result_keys:
         set_value_from_jmespath(complete_result,
                                 result_expression.expression, [])
     for _, page in self:
         # We're incrementally building the full response page
         # by page.  For each page in the response we need to
         # inject the necessary components from the page
         # into the complete_result.
         for result_expression in self.result_keys:
             # In order to incrementally update a result key
             # we need to search the existing value from complete_result,
             # then we need to search the _current_ page for the
             # current result key value.  Then we append the current
             # value onto the existing value, and re-set that value
             # as the new value.
             existing_value = result_expression.search(complete_result)
             result_value = result_expression.search(page)
             if result_value is not None:
                 existing_value.extend(result_value)
     merge_dicts(complete_result, self.non_aggregate_part)
     if self.resume_token is not None:
         complete_result['NextToken'] = self.resume_token
     return complete_result
コード例 #7
0
ファイル: test_utils.py プロジェクト: rayluo/botocore
 def test_new_keys(self):
     first = {"one": {"inner": "ORIGINAL"}, "two": {"inner": "ORIGINAL"}}
     second = {"three": {"foo": {"bar": "baz"}}}
     # In this case, second has no keys in common, but we'd still expect
     # this to get merged.
     merge_dicts(first, second)
     self.assertEqual(first["three"]["foo"]["bar"], "baz")
コード例 #8
0
ファイル: paginate.py プロジェクト: nooptr/dynamodb
 def build_full_result(self):
     complete_result = {}
     # Prepopulate the result keys with an empty list.
     for result_expression in self.result_keys:
         set_value_from_jmespath(complete_result,
                                 result_expression.expression, [])
     for response in self:
         page = response
         # We want to try to catch operation object pagination
         # and format correctly for those. They come in the form
         # of a tuple of two elements: (http_response, parsed_responsed).
         # We want the parsed_response as that is what the page iterator
         # uses. We can remove it though once operation objects are removed.
         if isinstance(response, tuple) and len(response) == 2:
             page = response[1]
         # We're incrementally building the full response page
         # by page.  For each page in the response we need to
         # inject the necessary components from the page
         # into the complete_result.
         for result_expression in self.result_keys:
             # In order to incrementally update a result key
             # we need to search the existing value from complete_result,
             # then we need to search the _current_ page for the
             # current result key value.  Then we append the current
             # value onto the existing value, and re-set that value
             # as the new value.
             existing_value = result_expression.search(complete_result)
             result_value = result_expression.search(page)
             if result_value is not None:
                 existing_value.extend(result_value)
     merge_dicts(complete_result, self.non_aggregate_part)
     if self.resume_token is not None:
         complete_result['NextToken'] = self.resume_token
     return complete_result
コード例 #9
0
ファイル: paginate.py プロジェクト: vchan/botocore
 def build_full_result(self):
     complete_result = {}
     # Prepopulate the result keys with an empty list.
     for result_expression in self.result_keys:
         set_value_from_jmespath(complete_result,
                                 result_expression.expression, [])
     for response in self:
         page = response
         # We want to try to catch operation object pagination
         # and format correctly for those. They come in the form
         # of a tuple of two elements: (http_response, parsed_responsed).
         # We want the parsed_response as that is what the page iterator
         # uses. We can remove it though once operation objects are removed.
         if isinstance(response, tuple) and len(response) == 2:
             page = response[1]
         # We're incrementally building the full response page
         # by page.  For each page in the response we need to
         # inject the necessary components from the page
         # into the complete_result.
         for result_expression in self.result_keys:
             # In order to incrementally update a result key
             # we need to search the existing value from complete_result,
             # then we need to search the _current_ page for the
             # current result key value.  Then we append the current
             # value onto the existing value, and re-set that value
             # as the new value.
             existing_value = result_expression.search(complete_result)
             result_value = result_expression.search(page)
             if result_value is not None:
                 existing_value.extend(result_value)
     merge_dicts(complete_result, self.non_aggregate_part)
     if self.resume_token is not None:
         complete_result['NextToken'] = self.resume_token
     return complete_result
コード例 #10
0
ファイル: test_utils.py プロジェクト: CloverHealth/botocore
 def test_new_keys(self):
     first = {'one': {'inner': 'ORIGINAL'}, 'two': {'inner': 'ORIGINAL'}}
     second = {'three': {'foo': {'bar': 'baz'}}}
     # In this case, second has no keys in common, but we'd still expect
     # this to get merged.
     merge_dicts(first, second)
     self.assertEqual(first['three']['foo']['bar'], 'baz')
コード例 #11
0
ファイル: test_utils.py プロジェクト: rayluo/botocore
    def test_merge_dicts_new_keys(self):
        first = {"foo": {"bar": {"baz": {"one": "ORIGINAL", "two": "ORIGINAL"}}}}
        second = {"foo": {"bar": {"baz": {"three": "UPDATE"}}}}

        merge_dicts(first, second)
        self.assertEqual(first["foo"]["bar"]["baz"]["one"], "ORIGINAL")
        self.assertEqual(first["foo"]["bar"]["baz"]["two"], "ORIGINAL")
        self.assertEqual(first["foo"]["bar"]["baz"]["three"], "UPDATE")
コード例 #12
0
ファイル: test_utils.py プロジェクト: rayluo/botocore
    def test_merge_dicts_overrides(self):
        first = {"foo": {"bar": {"baz": {"one": "ORIGINAL", "two": "ORIGINAL"}}}}
        second = {"foo": {"bar": {"baz": {"one": "UPDATE"}}}}

        merge_dicts(first, second)
        # The value from the second dict wins.
        self.assertEqual(first["foo"]["bar"]["baz"]["one"], "UPDATE")
        # And we still preserve the other attributes.
        self.assertEqual(first["foo"]["bar"]["baz"]["two"], "ORIGINAL")
コード例 #13
0
ファイル: test_utils.py プロジェクト: CloverHealth/botocore
    def test_merge_dicts_new_keys(self):
        first = {
            'foo': {'bar': {'baz': {'one': 'ORIGINAL', 'two': 'ORIGINAL'}}}}
        second = {'foo': {'bar': {'baz': {'three': 'UPDATE'}}}}

        merge_dicts(first, second)
        self.assertEqual(first['foo']['bar']['baz']['one'], 'ORIGINAL')
        self.assertEqual(first['foo']['bar']['baz']['two'], 'ORIGINAL')
        self.assertEqual(first['foo']['bar']['baz']['three'], 'UPDATE')
コード例 #14
0
ファイル: test_utils.py プロジェクト: CloverHealth/botocore
    def test_merge_dicts_overrides(self):
        first = {
            'foo': {'bar': {'baz': {'one': 'ORIGINAL', 'two': 'ORIGINAL'}}}}
        second = {'foo': {'bar': {'baz': {'one': 'UPDATE'}}}}

        merge_dicts(first, second)
        # The value from the second dict wins.
        self.assertEqual(first['foo']['bar']['baz']['one'], 'UPDATE')
        # And we still preserve the other attributes.
        self.assertEqual(first['foo']['bar']['baz']['two'], 'ORIGINAL')
コード例 #15
0
ファイル: test_utils.py プロジェクト: rayluo/botocore
    def test_more_than_one_sub_dict(self):
        first = {"one": {"inner": "ORIGINAL", "inner2": "ORIGINAL"}, "two": {"inner": "ORIGINAL", "inner2": "ORIGINAL"}}
        second = {"one": {"inner": "UPDATE"}, "two": {"inner": "UPDATE"}}

        merge_dicts(first, second)
        self.assertEqual(first["one"]["inner"], "UPDATE")
        self.assertEqual(first["one"]["inner2"], "ORIGINAL")

        self.assertEqual(first["two"]["inner"], "UPDATE")
        self.assertEqual(first["two"]["inner2"], "ORIGINAL")
コード例 #16
0
ファイル: test_utils.py プロジェクト: CloverHealth/botocore
    def test_more_than_one_sub_dict(self):
        first = {'one': {'inner': 'ORIGINAL', 'inner2': 'ORIGINAL'},
                 'two': {'inner': 'ORIGINAL', 'inner2': 'ORIGINAL'}}
        second = {'one': {'inner': 'UPDATE'}, 'two': {'inner': 'UPDATE'}}

        merge_dicts(first, second)
        self.assertEqual(first['one']['inner'], 'UPDATE')
        self.assertEqual(first['one']['inner2'], 'ORIGINAL')

        self.assertEqual(first['two']['inner'], 'UPDATE')
        self.assertEqual(first['two']['inner2'], 'ORIGINAL')
コード例 #17
0
def build_retry_config(endpoint_prefix, retry_model, definitions):
    service_config = retry_model.get(endpoint_prefix, {})
    resolve_references(service_config, definitions)
    # We want to merge the global defaults with the service specific
    # defaults, with the service specific defaults taking precedence.
    # So we use the global defaults as the base.
    final_retry_config = {'__default__': retry_model.get('__default__', {})}
    resolve_references(final_retry_config, definitions)
    # The merge the service specific config on top.
    merge_dicts(final_retry_config, service_config)
    return final_retry_config
コード例 #18
0
ファイル: translate.py プロジェクト: jstewmon/botocore
def build_retry_config(endpoint_prefix, retry_model, definitions):
    service_config = retry_model.get(endpoint_prefix, {})
    resolve_references(service_config, definitions)
    # We want to merge the global defaults with the service specific
    # defaults, with the service specific defaults taking precedence.
    # So we use the global defaults as the base.
    final_retry_config = {"__default__": retry_model.get("__default__", {})}
    resolve_references(final_retry_config, definitions)
    # The merge the service specific config on top.
    merge_dicts(final_retry_config, service_config)
    return final_retry_config
コード例 #19
0
    async def pages(self):
        client = self._parent.meta.client
        cleaned_params = self._params.copy()
        limit = cleaned_params.pop('limit', None)
        page_size = cleaned_params.pop('page_size', None)
        params = create_request_parameters(
            self._parent, self._model.request)
        merge_dicts(params, cleaned_params, append_lists=True)

        # Is this a paginated operation? If so, we need to get an
        # iterator for the various pages. If not, then we simply
        # call the operation and return the result as a single
        # page in a list. For non-paginated results, we just ignore
        # the page size parameter.
        if client.can_paginate(self._py_operation_name):
            logger.debug('Calling paginated %s:%s with %r',
                         self._parent.meta.service_name,
                         self._py_operation_name, params)
            paginator = client.get_paginator(self._py_operation_name)
            pages = paginator.paginate(
                PaginationConfig={
                    'MaxItems': limit, 'PageSize': page_size}, **params)
        else:
            @async_generator
            async def _aiopaginatordummy():
                res = await getattr(client, self._py_operation_name)(**params)
                await yield_(res)

            logger.debug('Calling %s:%s with %r',
                         self._parent.meta.service_name,
                         self._py_operation_name, params)
            pages = _aiopaginatordummy()

        # Now that we have a page iterator or single page of results
        # we start processing and yielding individual items.
        count = 0
        async for page in pages:
            page_items = []
            for item in self._handler(self._parent, params, page):
                page_items.append(item)

                # If the limit is set and has been reached, then
                # we stop processing items here.
                count += 1
                if limit is not None and count >= limit:
                    break

            await yield_(page_items)

            # Stop reading pages if we've reached out limit
            if limit is not None and count >= limit:
                break
コード例 #20
0
ファイル: paginate.py プロジェクト: ticosax/aiobotocore
 def build_full_result(self):
     complete_result = {}
     while True:
         response = yield from self.next_page()
         if response is None:
             break
         page = response
         # We want to try to catch operation object pagination
         # and format correctly for those. They come in the form
         # of a tuple of two elements: (http_response, parsed_responsed).
         # We want the parsed_response as that is what the page iterator
         # uses. We can remove it though once operation objects are removed.
         if isinstance(response, tuple) and len(response) == 2:
             page = response[1]
         # We're incrementally building the full response page
         # by page.  For each page in the response we need to
         # inject the necessary components from the page
         # into the complete_result.
         for result_expression in self.result_keys:
             # In order to incrementally update a result key
             # we need to search the existing value from complete_result,
             # then we need to search the _current_ page for the
             # current result key value.  Then we append the current
             # value onto the existing value, and re-set that value
             # as the new value.
             result_value = result_expression.search(page)
             if result_value is None:
                 continue
             existing_value = result_expression.search(complete_result)
             if existing_value is None:
                 # Set the initial result
                 set_value_from_jmespath(complete_result,
                                         result_expression.expression,
                                         result_value)
                 continue
             # Now both result_value and existing_value contain something
             if isinstance(result_value, list):
                 existing_value.extend(result_value)
             elif isinstance(result_value, (int, float, str)):
                 # Modify the existing result with the sum or concatenation
                 set_value_from_jmespath(complete_result,
                                         result_expression.expression,
                                         existing_value + result_value)
     merge_dicts(complete_result, self.non_aggregate_part)
     if self.resume_token is not None:
         complete_result['NextToken'] = self.resume_token
     return complete_result
コード例 #21
0
ファイル: paginate.py プロジェクト: Jc2k/aiobotocore
 def build_full_result(self):
     complete_result = {}
     while True:
         response = yield from self.next_page()
         if response is None:
             break
         page = response
         # We want to try to catch operation object pagination
         # and format correctly for those. They come in the form
         # of a tuple of two elements: (http_response, parsed_responsed).
         # We want the parsed_response as that is what the page iterator
         # uses. We can remove it though once operation objects are removed.
         if isinstance(response, tuple) and len(response) == 2:
             page = response[1]
         # We're incrementally building the full response page
         # by page.  For each page in the response we need to
         # inject the necessary components from the page
         # into the complete_result.
         for result_expression in self.result_keys:
             # In order to incrementally update a result key
             # we need to search the existing value from complete_result,
             # then we need to search the _current_ page for the
             # current result key value.  Then we append the current
             # value onto the existing value, and re-set that value
             # as the new value.
             result_value = result_expression.search(page)
             if result_value is None:
                 continue
             existing_value = result_expression.search(complete_result)
             if existing_value is None:
                 # Set the initial result
                 set_value_from_jmespath(
                     complete_result, result_expression.expression,
                     result_value)
                 continue
             # Now both result_value and existing_value contain something
             if isinstance(result_value, list):
                 existing_value.extend(result_value)
             elif isinstance(result_value, (int, float, str)):
                 # Modify the existing result with the sum or concatenation
                 set_value_from_jmespath(
                     complete_result, result_expression.expression,
                     existing_value + result_value)
     merge_dicts(complete_result, self.non_aggregate_part)
     if self.resume_token is not None:
         complete_result['NextToken'] = self.resume_token
     return complete_result
コード例 #22
0
def add_retry_configs(new_model, retry_model, definitions):
    if not retry_model:
        new_model['retry'] = {}
        return
    # The service specific retry config is keyed off of the endpoint
    # prefix as defined in the JSON model.
    endpoint_prefix = new_model.get('endpoint_prefix', '')
    service_config = retry_model.get(endpoint_prefix, {})
    resolve_references(service_config, definitions)
    # We want to merge the global defaults with the service specific
    # defaults, with the service specific defaults taking precedence.
    # So we use the global defaults as the base.
    final_retry_config = {'__default__': retry_model.get('__default__', {})}
    resolve_references(final_retry_config, definitions)
    # The merge the service specific config on top.
    merge_dicts(final_retry_config, service_config)
    new_model['retry'] = final_retry_config
コード例 #23
0
ファイル: test_utils.py プロジェクト: zined/botocore
    def test_merge_dicts_new_keys(self):
        first = {
            'foo': {
                'bar': {
                    'baz': {
                        'one': 'ORIGINAL',
                        'two': 'ORIGINAL'
                    }
                }
            }
        }
        second = {'foo': {'bar': {'baz': {'three': 'UPDATE'}}}}

        merge_dicts(first, second)
        self.assertEqual(first['foo']['bar']['baz']['one'], 'ORIGINAL')
        self.assertEqual(first['foo']['bar']['baz']['two'], 'ORIGINAL')
        self.assertEqual(first['foo']['bar']['baz']['three'], 'UPDATE')
コード例 #24
0
ファイル: translate.py プロジェクト: j3tm0t0/botocore
def add_retry_configs(new_model, retry_model, definitions):
    if not retry_model:
        new_model["retry"] = {}
        return
    # The service specific retry config is keyed off of the endpoint
    # prefix as defined in the JSON model.
    endpoint_prefix = new_model.get("endpoint_prefix", "")
    service_config = retry_model.get(endpoint_prefix, {})
    resolve_references(service_config, definitions)
    # We want to merge the global defaults with the service specific
    # defaults, with the service specific defaults taking precedence.
    # So we use the global defaults as the base.
    final_retry_config = {"__default__": retry_model.get("__default__", {})}
    resolve_references(final_retry_config, definitions)
    # The merge the service specific config on top.
    merge_dicts(final_retry_config, service_config)
    new_model["retry"] = final_retry_config
コード例 #25
0
ファイル: test_utils.py プロジェクト: zined/botocore
    def test_merge_dicts_overrides(self):
        first = {
            'foo': {
                'bar': {
                    'baz': {
                        'one': 'ORIGINAL',
                        'two': 'ORIGINAL'
                    }
                }
            }
        }
        second = {'foo': {'bar': {'baz': {'one': 'UPDATE'}}}}

        merge_dicts(first, second)
        # The value from the second dict wins.
        self.assertEqual(first['foo']['bar']['baz']['one'], 'UPDATE')
        # And we still preserve the other attributes.
        self.assertEqual(first['foo']['bar']['baz']['two'], 'ORIGINAL')
コード例 #26
0
ファイル: test_utils.py プロジェクト: zined/botocore
    def test_more_than_one_sub_dict(self):
        first = {
            'one': {
                'inner': 'ORIGINAL',
                'inner2': 'ORIGINAL'
            },
            'two': {
                'inner': 'ORIGINAL',
                'inner2': 'ORIGINAL'
            }
        }
        second = {'one': {'inner': 'UPDATE'}, 'two': {'inner': 'UPDATE'}}

        merge_dicts(first, second)
        self.assertEqual(first['one']['inner'], 'UPDATE')
        self.assertEqual(first['one']['inner2'], 'ORIGINAL')

        self.assertEqual(first['two']['inner'], 'UPDATE')
        self.assertEqual(first['two']['inner2'], 'ORIGINAL')
コード例 #27
0
ファイル: translate.py プロジェクト: boto/botocore
def build_retry_config(endpoint_prefix, retry_model, definitions,
                       client_retry_config=None):
    service_config = retry_model.get(endpoint_prefix, {})
    resolve_references(service_config, definitions)
    # We want to merge the global defaults with the service specific
    # defaults, with the service specific defaults taking precedence.
    # So we use the global defaults as the base.
    #
    # A deepcopy is done on the retry defaults because it ensures the
    # retry model has no chance of getting mutated when the service specific
    # configuration or client retry config is merged in.
    final_retry_config = {
        '__default__': copy.deepcopy(retry_model.get('__default__', {}))
    }
    resolve_references(final_retry_config, definitions)
    # The merge the service specific config on top.
    merge_dicts(final_retry_config, service_config)
    if client_retry_config is not None:
        _merge_client_retry_config(final_retry_config, client_retry_config)
    return final_retry_config
コード例 #28
0
ファイル: collection.py プロジェクト: thedrow/boto3
    def _clone(self, **kwargs):
        """
        Create a clone of this collection. This is used by the methods
        below to provide a chainable interface that returns copies
        rather than the original. This allows things like:

            >>> base = collection.filter(Param1=1)
            >>> query1 = base.filter(Param2=2)
            >>> query2 = base.filter(Param3=3)
            >>> query1.params
            {'Param1': 1, 'Param2': 2}
            >>> query2.params
            {'Param1': 1, 'Param3': 3}

        :rtype: :py:class:`ResourceCollection`
        :return: A clone of this resource collection
        """
        params = copy.deepcopy(self._params)
        merge_dicts(params, kwargs, append_lists=True)
        clone = self.__class__(self._model, self._parent, self._handler, **params)
        return clone
コード例 #29
0
ファイル: parsers.py プロジェクト: bmk10/GridMateBook_1.18
 def _parse_error_from_body(self, response):
     xml_contents = response['body']
     root = self._parse_xml_string_to_dom(xml_contents)
     parsed = self._build_name_to_xml_node(root)
     self._replace_nodes(parsed)
     if root.tag == 'Error':
         # This is an S3 error response.  First we'll populate the
         # response metadata.
         metadata = self._populate_response_metadata(response)
         # The RequestId and the HostId are already in the
         # ResponseMetadata, but are also duplicated in the XML
         # body.  We don't need these values in both places,
         # we'll just remove them from the parsed XML body.
         parsed.pop('RequestId', '')
         parsed.pop('HostId', '')
         return {'Error': parsed, 'ResponseMetadata': metadata}
     elif 'RequestId' in parsed:
         # Other rest-xml serivces:
         parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
     default = {'Error': {'Message': '', 'Code': ''}}
     merge_dicts(default, parsed)
     return default
コード例 #30
0
def build_retry_config(endpoint_prefix,
                       retry_model,
                       definitions,
                       client_retry_config=None):
    service_config = retry_model.get(endpoint_prefix, {})
    resolve_references(service_config, definitions)
    # We want to merge the global defaults with the service specific
    # defaults, with the service specific defaults taking precedence.
    # So we use the global defaults as the base.
    #
    # A deepcopy is done on the retry defaults because it ensures the
    # retry model has no chance of getting mutated when the service specific
    # configuration or client retry config is merged in.
    final_retry_config = {
        '__default__': copy.deepcopy(retry_model.get('__default__', {}))
    }
    resolve_references(final_retry_config, definitions)
    # The merge the service specific config on top.
    merge_dicts(final_retry_config, service_config)
    if client_retry_config is not None:
        _merge_client_retry_config(final_retry_config, client_retry_config)
    return final_retry_config
コード例 #31
0
ファイル: collection.py プロジェクト: Baba7080/electr
    def _clone(self, **kwargs):
        """
        Create a clone of this collection. This is used by the methods
        below to provide a chainable interface that returns copies
        rather than the original. This allows things like:

            >>> base = collection.filter(Param1=1)
            >>> query1 = base.filter(Param2=2)
            >>> query2 = base.filter(Param3=3)
            >>> query1.params
            {'Param1': 1, 'Param2': 2}
            >>> query2.params
            {'Param1': 1, 'Param3': 3}

        :rtype: :py:class:`ResourceCollection`
        :return: A clone of this resource collection
        """
        params = copy.deepcopy(self._params)
        merge_dicts(params, kwargs, append_lists=True)
        clone = self.__class__(self._model, self._parent, self._handler,
                               **params)
        return clone
コード例 #32
0
ファイル: test_utils.py プロジェクト: rayluo/botocore
 def test_merge_empty_dict_does_nothing(self):
     first = {"foo": {"bar": "baz"}}
     merge_dicts(first, {})
     self.assertEqual(first, {"foo": {"bar": "baz"}})
コード例 #33
0
ファイル: test_utils.py プロジェクト: CloverHealth/botocore
 def test_list_values_missing_key(self):
     dict1 = {}
     dict2 = {'Foo': ['foo_value']}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(
         dict1, {'Foo': ['foo_value']})
コード例 #34
0
ファイル: test_utils.py プロジェクト: CloverHealth/botocore
 def test_list_values_mismatching_types(self):
     dict1 = {'Foo': 'old_foo_value'}
     dict2 = {'Foo': ['new_foo_value']}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(
         dict1, {'Foo': ['new_foo_value']})
コード例 #35
0
ファイル: test_utils.py プロジェクト: CloverHealth/botocore
 def test_list_values_append(self):
     dict1 = {'Foo': ['old_foo_value']}
     dict2 = {'Foo': ['new_foo_value']}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(
         dict1, {'Foo': ['old_foo_value', 'new_foo_value']})
コード例 #36
0
ファイル: test_utils.py プロジェクト: rayluo/botocore
 def test_list_values_append(self):
     dict1 = {"Foo": ["old_foo_value"]}
     dict2 = {"Foo": ["new_foo_value"]}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(dict1, {"Foo": ["old_foo_value", "new_foo_value"]})
コード例 #37
0
ファイル: test_utils.py プロジェクト: rayluo/botocore
 def test_list_values_mismatching_types(self):
     dict1 = {"Foo": "old_foo_value"}
     dict2 = {"Foo": ["new_foo_value"]}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(dict1, {"Foo": ["new_foo_value"]})
コード例 #38
0
ファイル: test_utils.py プロジェクト: CloverHealth/botocore
 def test_merge_empty_dict_does_nothing(self):
     first = {'foo': {'bar': 'baz'}}
     merge_dicts(first, {})
     self.assertEqual(first, {'foo': {'bar': 'baz'}})
コード例 #39
0
ファイル: test_utils.py プロジェクト: rayluo/botocore
 def test_list_values_missing_key(self):
     dict1 = {}
     dict2 = {"Foo": ["foo_value"]}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(dict1, {"Foo": ["foo_value"]})
コード例 #40
0
ファイル: test_utils.py プロジェクト: zined/botocore
 def test_merge_empty_dict_does_nothing(self):
     first = {'foo': {'bar': 'baz'}}
     merge_dicts(first, {})
     self.assertEqual(first, {'foo': {'bar': 'baz'}})
コード例 #41
0
ファイル: collection.py プロジェクト: Baba7080/electr
    def pages(self):
        """
        A generator which yields pages of resource instances after
        doing the appropriate service operation calls and handling
        any pagination on your behalf. Non-paginated calls will
        return a single page of items.

        Page size, item limit, and filter parameters are applied
        if they have previously been set.

            >>> bucket = s3.Bucket('boto3')
            >>> for page in bucket.objects.pages():
            ...     for obj in page:
            ...         print(obj.key)
            'key1'
            'key2'

        :rtype: list(:py:class:`~boto3.resources.base.ServiceResource`)
        :return: List of resource instances
        """
        client = self._parent.meta.client
        cleaned_params = self._params.copy()
        limit = cleaned_params.pop('limit', None)
        page_size = cleaned_params.pop('page_size', None)
        params = create_request_parameters(self._parent, self._model.request)
        merge_dicts(params, cleaned_params, append_lists=True)

        # Is this a paginated operation? If so, we need to get an
        # iterator for the various pages. If not, then we simply
        # call the operation and return the result as a single
        # page in a list. For non-paginated results, we just ignore
        # the page size parameter.
        if client.can_paginate(self._py_operation_name):
            logger.debug('Calling paginated %s:%s with %r',
                         self._parent.meta.service_name,
                         self._py_operation_name, params)
            paginator = client.get_paginator(self._py_operation_name)
            pages = paginator.paginate(PaginationConfig={
                'MaxItems': limit,
                'PageSize': page_size
            },
                                       **params)
        else:
            logger.debug('Calling %s:%s with %r',
                         self._parent.meta.service_name,
                         self._py_operation_name, params)
            pages = [getattr(client, self._py_operation_name)(**params)]

        # Now that we have a page iterator or single page of results
        # we start processing and yielding individual items.
        count = 0
        for page in pages:
            page_items = []
            for item in self._handler(self._parent, params, page):
                page_items.append(item)

                # If the limit is set and has been reached, then
                # we stop processing items here.
                count += 1
                if limit is not None and count >= limit:
                    break

            yield page_items

            # Stop reading pages if we've reached out limit
            if limit is not None and count >= limit:
                break
コード例 #42
0
ファイル: test_utils.py プロジェクト: zined/botocore
 def test_list_values_append(self):
     dict1 = {'Foo': ['old_foo_value']}
     dict2 = {'Foo': ['new_foo_value']}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(dict1, {'Foo': ['old_foo_value', 'new_foo_value']})
コード例 #43
0
ファイル: test_utils.py プロジェクト: zined/botocore
 def test_list_values_mismatching_types(self):
     dict1 = {'Foo': 'old_foo_value'}
     dict2 = {'Foo': ['new_foo_value']}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(dict1, {'Foo': ['new_foo_value']})
コード例 #44
0
ファイル: collection.py プロジェクト: thedrow/boto3
    def pages(self):
        """
        A generator which yields pages of resource instances after
        doing the appropriate service operation calls and handling
        any pagination on your behalf. Non-paginated calls will
        return a single page of items.

        Page size, item limit, and filter parameters are applied
        if they have previously been set.

            >>> bucket = s3.Bucket('boto3')
            >>> for page in bucket.objects.pages():
            ...     for obj in page:
            ...         print(obj.key)
            'key1'
            'key2'

        :rtype: list(:py:class:`~boto3.resources.base.ServiceResource`)
        :return: List of resource instances
        """
        client = self._parent.meta.client
        cleaned_params = self._params.copy()
        limit = cleaned_params.pop("limit", None)
        page_size = cleaned_params.pop("page_size", None)
        params = create_request_parameters(self._parent, self._model.request)
        merge_dicts(params, cleaned_params, append_lists=True)

        # Is this a paginated operation? If so, we need to get an
        # iterator for the various pages. If not, then we simply
        # call the operation and return the result as a single
        # page in a list. For non-paginated results, we just ignore
        # the page size parameter.
        if client.can_paginate(self._py_operation_name):
            logger.info(
                "Calling paginated %s:%s with %r", self._parent.meta.service_name, self._py_operation_name, params
            )
            paginator = client.get_paginator(self._py_operation_name)
            pages = paginator.paginate(PaginationConfig={"MaxItems": limit, "PageSize": page_size}, **params)
        else:
            logger.info("Calling %s:%s with %r", self._parent.meta.service_name, self._py_operation_name, params)
            pages = [getattr(client, self._py_operation_name)(**params)]

        # Now that we have a page iterator or single page of results
        # we start processing and yielding individual items.
        count = 0
        for page in pages:
            page_items = []
            for item in self._handler(self._parent, params, page):
                page_items.append(item)

                # If the limit is set and has been reached, then
                # we stop processing items here.
                count += 1
                if limit is not None and count >= limit:
                    break

            yield page_items

            # Stop reading pages if we've reached out limit
            if limit is not None and count >= limit:
                break
コード例 #45
0
ファイル: test_utils.py プロジェクト: zined/botocore
 def test_list_values_missing_key(self):
     dict1 = {}
     dict2 = {'Foo': ['foo_value']}
     merge_dicts(dict1, dict2, append_lists=True)
     self.assertEqual(dict1, {'Foo': ['foo_value']})