示例#1
0
文件: sync.py 项目: penesoft/neutron
 def _fetch_data(self, uri, cursor, page_size):
     # If not cursor there is nothing to retrieve
     if cursor:
         if cursor == "start":
             cursor = None
         # Chunk size tuning might, in some conditions, make it larger
         # than 5,000, which is the maximum page size allowed by the NVP
         # API. In this case the request should be split in multiple
         # requests. This is not ideal, and therefore a log warning will
         # be emitted.
         num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
         if num_requests > 1:
             LOG.warn(
                 _(
                     "Requested page size is %(cur_chunk_size)d."
                     "It might be necessary to do %(num_requests)d "
                     "round-trips to NVP for fetching data. Please "
                     "tune sync parameters to ensure chunk size "
                     "is less than %(max_page_size)d"
                 ),
                 {"cur_chunk_size": page_size, "num_requests": num_requests, "max_page_size": MAX_PAGE_SIZE},
             )
         # Only the first request might return the total size,
         # subsequent requests will definetely not
         results, cursor, total_size = nsxlib.get_single_query_page(
             uri, self._cluster, cursor, min(page_size, MAX_PAGE_SIZE)
         )
         for _req in range(num_requests - 1):
             # If no cursor is returned break the cycle as there is no
             # actual need to perform multiple requests (all fetched)
             # This happens when the overall size of resources exceeds
             # the maximum page size, but the number for each single
             # resource type is below this threshold
             if not cursor:
                 break
             req_results, cursor = nsxlib.get_single_query_page(
                 uri, self._cluster, cursor, min(page_size, MAX_PAGE_SIZE)
             )[:2]
             results.extend(req_results)
         # reset cursor before returning if we queried just to
         # know the number of entities
         return results, cursor if page_size else "start", total_size
     return [], cursor, None
示例#2
0
 def _fetch_data(self, uri, cursor, page_size):
     # If not cursor there is nothing to retrieve
     if cursor:
         if cursor == 'start':
             cursor = None
         # Chunk size tuning might, in some conditions, make it larger
         # than 5,000, which is the maximum page size allowed by the NSX
         # API. In this case the request should be split in multiple
         # requests. This is not ideal, and therefore a log warning will
         # be emitted.
         num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
         if num_requests > 1:
             LOG.warn(
                 _("Requested page size is %(cur_chunk_size)d."
                   "It might be necessary to do %(num_requests)d "
                   "round-trips to NSX for fetching data. Please "
                   "tune sync parameters to ensure chunk size "
                   "is less than %(max_page_size)d"), {
                       'cur_chunk_size': page_size,
                       'num_requests': num_requests,
                       'max_page_size': MAX_PAGE_SIZE
                   })
         # Only the first request might return the total size,
         # subsequent requests will definetely not
         results, cursor, total_size = nsxlib.get_single_query_page(
             uri, self._cluster, cursor, min(page_size, MAX_PAGE_SIZE))
         for _req in range(num_requests - 1):
             # If no cursor is returned break the cycle as there is no
             # actual need to perform multiple requests (all fetched)
             # This happens when the overall size of resources exceeds
             # the maximum page size, but the number for each single
             # resource type is below this threshold
             if not cursor:
                 break
             req_results, cursor = nsxlib.get_single_query_page(
                 uri, self._cluster, cursor, min(page_size,
                                                 MAX_PAGE_SIZE))[:2]
             results.extend(req_results)
         # reset cursor before returning if we queried just to
         # know the number of entities
         return results, cursor if page_size else 'start', total_size
     return [], cursor, None