def _fetch_router_info(self, router_ids=None, device_ids=None, all_routers=False): """Fetch router dict from the routing plugin. :param router_ids: List of router_ids of routers to fetch :param device_ids: List of device_ids whose routers to fetch :param all_routers: If True fetch all the routers for this agent. :return: List of router dicts of format: [ {router_dict1}, {router_dict2},.....] """ try: if all_routers: router_ids = self.plugin_rpc.get_router_ids(self.context) return self._fetch_router_chunk_data(router_ids) if router_ids: return self._fetch_router_chunk_data(router_ids) if device_ids: return self.plugin_rpc.get_routers(self.context, hd_ids=device_ids) except oslo_messaging.MessagingTimeout: if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE: self.sync_routers_chunk_size = max( self.sync_routers_chunk_size / 2, SYNC_ROUTERS_MIN_CHUNK_SIZE) LOG.error(_LE('Server failed to return info for routers in ' 'required time, decreasing chunk size to: %s'), self.sync_routers_chunk_size) else: LOG.error(_LE('Server failed to return info for routers in ' 'required time even with min chunk size: %s. ' 'It might be under very high load or ' 'just inoperable'), self.sync_routers_chunk_size) raise except oslo_messaging.MessagingException: LOG.exception(_LE("RPC Error in fetching routers from plugin")) raise n_exc.AbortSyncRouters() self.fullsync = True LOG.debug("Periodic_sync_routers_task successfully completed") # adjust chunk size after successful sync if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE: self.sync_routers_chunk_size = min( self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE, SYNC_ROUTERS_MAX_CHUNK_SIZE)
def fetch_and_sync_all_routers(self, context, ns_manager): prev_router_ids = set(self.router_info) timestamp = timeutils.utcnow() try: if self.conf.use_namespaces: routers = self.plugin_rpc.get_routers(context) else: routers = self.plugin_rpc.get_routers(context, [self.conf.router_id]) except oslo_messaging.MessagingException: LOG.exception(_LE("Failed synchronizing routers due to RPC error")) raise n_exc.AbortSyncRouters() else: LOG.debug('Processing :%r', routers) for r in routers: ns_manager.keep_router(r['id']) if r.get('distributed'): # need to keep fip namespaces as well ext_net_id = (r['external_gateway_info'] or {}).get('network_id') if ext_net_id: ns_manager.keep_ext_net(ext_net_id) update = queue.RouterUpdate(r['id'], queue.PRIORITY_SYNC_ROUTERS_TASK, router=r, timestamp=timestamp) self._queue.add(update) self.fullsync = False LOG.debug("periodic_sync_routers_task successfully completed") curr_router_ids = set([r['id'] for r in routers]) # Delete routers that have disappeared since the last sync for router_id in prev_router_ids - curr_router_ids: ns_manager.keep_router(router_id) update = queue.RouterUpdate(router_id, queue.PRIORITY_SYNC_ROUTERS_TASK, timestamp=timestamp, action=queue.DELETE_ROUTER) self._queue.add(update)
def fetch_and_sync_all_routers(self, context, ns_manager): prev_router_ids = set(self.router_info) curr_router_ids = set() timestamp = timeutils.utcnow() router_ids = [] chunk = [] is_snat_agent = (self.conf.agent_mode == lib_const.L3_AGENT_MODE_DVR_SNAT) try: router_ids = self.plugin_rpc.get_router_ids(context) # We set HA network port status to DOWN to let l2 agent update it # to ACTIVE after wiring. This allows us to spawn keepalived only # when l2 agent finished wiring the port. self.plugin_rpc.update_all_ha_network_port_statuses(context) # fetch routers by chunks to reduce the load on server and to # start router processing earlier for i in range(0, len(router_ids), self.sync_routers_chunk_size): chunk = router_ids[i:i + self.sync_routers_chunk_size] routers = self.plugin_rpc.get_routers(context, chunk) LOG.debug('Processing :%r', routers) for r in routers: curr_router_ids.add(r['id']) ns_manager.keep_router(r['id']) if r.get('distributed'): # need to keep fip namespaces as well ext_net_id = (r['external_gateway_info'] or {}).get( 'network_id') if ext_net_id: ns_manager.keep_ext_net(ext_net_id) elif is_snat_agent and not r.get('ha'): ns_manager.ensure_snat_cleanup(r['id']) update = queue.RouterUpdate( r['id'], queue.PRIORITY_SYNC_ROUTERS_TASK, router=r, timestamp=timestamp) self._queue.add(update) except oslo_messaging.MessagingTimeout: if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE: self.sync_routers_chunk_size = max( self.sync_routers_chunk_size / 2, SYNC_ROUTERS_MIN_CHUNK_SIZE) LOG.error('Server failed to return info for routers in ' 'required time, decreasing chunk size to: %s', self.sync_routers_chunk_size) else: LOG.error('Server failed to return info for routers in ' 'required time even with min chunk size: %s. ' 'It might be under very high load or ' 'just inoperable', self.sync_routers_chunk_size) raise except oslo_messaging.MessagingException: failed_routers = chunk or router_ids LOG.exception("Failed synchronizing routers '%s' " "due to RPC error", failed_routers) raise n_exc.AbortSyncRouters() self.fullsync = False LOG.debug("periodic_sync_routers_task successfully completed") # adjust chunk size after successful sync if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE: self.sync_routers_chunk_size = min( self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE, SYNC_ROUTERS_MAX_CHUNK_SIZE) # Delete routers that have disappeared since the last sync for router_id in prev_router_ids - curr_router_ids: ns_manager.keep_router(router_id) update = queue.RouterUpdate(router_id, queue.PRIORITY_SYNC_ROUTERS_TASK, timestamp=timestamp, action=queue.DELETE_ROUTER) self._queue.add(update)
def fetch_and_sync_all_routers(self, context, ns_manager): prev_router_ids = set(self.router_info) curr_router_ids = set() timestamp = timeutils.utcnow() try: router_ids = ([self.conf.router_id] if self.conf.router_id else self.plugin_rpc.get_router_ids(context)) # fetch routers by chunks to reduce the load on server and to # start router processing earlier for i in range(0, len(router_ids), self.sync_routers_chunk_size): routers = self.plugin_rpc.get_routers( context, router_ids[i:i + self.sync_routers_chunk_size]) LOG.debug('Processing :%r', routers) for r in routers: curr_router_ids.add(r['id']) ns_manager.keep_router(r['id']) if r.get('distributed'): # need to keep fip namespaces as well ext_net_id = (r['external_gateway_info'] or {}).get( 'network_id') if ext_net_id: ns_manager.keep_ext_net(ext_net_id) update = queue.RouterUpdate( r['id'], queue.PRIORITY_SYNC_ROUTERS_TASK, router=r, timestamp=timestamp) self._queue.add(update) except oslo_messaging.MessagingTimeout: if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE: self.sync_routers_chunk_size = max( self.sync_routers_chunk_size / 2, SYNC_ROUTERS_MIN_CHUNK_SIZE) LOG.error(_LE('Server failed to return info for routers in ' 'required time, decreasing chunk size to: %s'), self.sync_routers_chunk_size) else: LOG.error(_LE('Server failed to return info for routers in ' 'required time even with min chunk size: %s. ' 'It might be under very high load or ' 'just inoperable'), self.sync_routers_chunk_size) raise except oslo_messaging.MessagingException: LOG.exception(_LE("Failed synchronizing routers due to RPC error")) raise n_exc.AbortSyncRouters() self.fullsync = False LOG.debug("periodic_sync_routers_task successfully completed") # adjust chunk size after successful sync if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE: self.sync_routers_chunk_size = min( self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE, SYNC_ROUTERS_MAX_CHUNK_SIZE) # Delete routers that have disappeared since the last sync for router_id in prev_router_ids - curr_router_ids: ns_manager.keep_router(router_id) update = queue.RouterUpdate(router_id, queue.PRIORITY_SYNC_ROUTERS_TASK, timestamp=timestamp, action=queue.DELETE_ROUTER) self._queue.add(update)