class Notifier: def __init__(self, logger: Logger): self._logger = logger self._q = LightQueue(maxsize=1000) # ensure no one is writing to the same broker connection concurrently self._sio = storage.utils.SIOManager.create(write_only=True) def _process(self, ar: AttackResult): flag_data = ar.get_flag_notification() self._logger.debug('Sending notification with %s', flag_data) self._sio.emit( event='flag_stolen', data={'data': flag_data}, namespace='/live_events', ) def add(self, ar: AttackResult) -> bool: try: self._q.put_nowait(ar) return True except Full: return False def __call__(self) -> None: while True: try: ar = self._q.get(block=True, timeout=3) except Empty: eventlet.sleep(0.5) else: self._process(ar)
class ProducerPool(object): Receipt = Receipt def __init__(self, size=20): self.size = size self.inqueue = LightQueue() self._running = None self._producers = None def apply_async(self, task, args, kwargs, callback=None, **options): if self._running is None: self._running = spawn_n(self._run) receipt = self.Receipt(callback) self.inqueue.put((task, args, kwargs, options, receipt)) return receipt def _run(self): self._producers = [spawn_n(self._producer) for _ in xrange(self.size)] def _producer(self): connection = current_app.broker_connection() publisher = current_app.amqp.TaskPublisher(connection) inqueue = self.inqueue while 1: task, args, kwargs, options, receipt = inqueue.get() result = task.apply_async(args, kwargs, publisher=publisher, **options) receipt.finished(result)
class ProducerPool(object): """Usage:: >>> app = Celery(broker='amqp://') >>> ProducerPool(app) """ Receipt = Receipt def __init__(self, app, size=20): self.app = app self.size = size self.inqueue = LightQueue() self._running = None self._producers = None def apply_async(self, task, args, kwargs, callback=None, **options): if self._running is None: self._running = spawn_n(self._run) receipt = self.Receipt(callback) self.inqueue.put((task, args, kwargs, options, receipt)) return receipt def _run(self): self._producers = [spawn_n(self._producer) for _ in range(self.size)] def _producer(self): inqueue = self.inqueue with self.app.producer_or_acquire() as producer: while 1: task, args, kwargs, options, receipt = inqueue.get() result = task.apply_async( args, kwargs, producer=producer, **options) receipt.finished(result)
class ProducerPool(object): Receipt = Receipt def __init__(self, size=20): self.size = size self.inqueue = LightQueue() self._running = None self._producers = None def apply_async(self, task, args, kwargs, callback=None, **options): if self._running is None: self._running = spawn_n(self._run) receipt = self.Receipt(callback) self.inqueue.put((task, args, kwargs, options, receipt)) return receipt def _run(self): self._producers = [spawn_n(self._producer) for _ in xrange(self.size)] def _producer(self): connection = current_app.broker_connection() publisher = current_app.amqp.TaskProducer(connection) inqueue = self.inqueue while 1: task, args, kwargs, options, receipt = inqueue.get() result = task.apply_async(args, kwargs, publisher=publisher, **options) receipt.finished(result)
def get(self, block=True, timeout=None): event = LightQueue(1) if flask.has_request_context(): emit('{}#get'.format(self._uuid), callback=lambda x: event.put(x)) else: sio = flask.current_app.extensions['socketio'] sio.emit('{}#get'.format(self._uuid), callback=lambda x: event.put(x)) return event.get(timeout=1)
def __init__(self, name, socket=None): """Takes a string and maybe a socket. If given a socket, we will try to play nice with its loop. """ LightQueue.__init__(self) self._socket = socket self._name = name
def get(self, timeout=10): name = getter.__name__ # pylint: disable=protected-access signal = '{uuid}#{event}'.format(uuid=self._uuid, event=name) event = LightQueue(1) if flask.has_request_context(): emit(signal, callback=lambda x: event.put(unpack(x))) else: sio = flask.current_app.extensions['socketio'] sio.emit(signal, callback=lambda x: event.put(unpack(x))) data = event.get(timeout=timeout) return getter(self, data)
def get(self, timeout=10): # pylint: disable=missing-docstring name = getter.__name__ signal = '{uuid}{sep}{event}'.format( uuid=self._uuid, # pylint: disable=protected-access sep=SEPARATOR, event=name ) event = LightQueue(1) if flask.has_request_context(): emit(signal, callback=lambda x: event.put(unpack(x))) else: sio = flask.current_app.extensions['socketio'] sio.emit(signal, callback=lambda x: event.put(unpack(x))) data = event.get(timeout=timeout) return getter(self, data)
def __init__(self) -> None: # Tanner (6/15/21): black does not treat all these type definitions and comments kindly, so turning off formatting # fmt: off self._instrument_communication_error_queue: Queue[Tuple[ Exception, str]] = Queue() # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 self._instrument_comm_board_queues: Tuple[Tuple[Queue[Dict[str, Any]], Queue[Dict[str, Any]], Queue[Any]], ..., # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 ] = tuple((Queue(), Queue(), Queue()) for _ in range(1)) self._from_main_to_file_writer_queue: Queue[Dict[str, Any]] = Queue() # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 self._from_file_writer_to_main_queue: Queue[Dict[str, Any]] = Queue() # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 self._file_writer_error_queue: Queue[Tuple[Exception, str]] = Queue() # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 self._file_writer_board_queues: Tuple[ Tuple[Queue[Any], Queue[Any]], ... # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 ] = tuple((self._instrument_comm_board_queues[i][2], Queue()) for i in range(1)) self._data_analyzer_board_queues: Tuple[ Tuple[Queue[Any], Queue[Any]], ... # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498, # noqa: E231 # flake8 doesn't understand the 3 dots for type definition ] = tuple( (self._file_writer_board_queues[i][1], Queue()) for i in range(1)) self._from_main_to_data_analyzer_queue: Queue[Dict[str, Any]] = Queue() # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 self._from_data_analyzer_to_main_queue: Queue[Dict[str, Any]] = Queue() # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 self._data_analyzer_error_queue: Queue[Tuple[Exception, str]] = Queue() # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 self._from_server_to_main_queue: queue.Queue[Dict[ str, Any]] = queue.Queue() # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498 self._data_to_server_queue: LightQueue = LightQueue()
def __init__(self, connection_class=Connection, max_connections=None, **connection_kwargs): self.pid = os.getpid() self.connection_class = connection_class self.connection_kwargs = connection_kwargs self.max_connections = max_connections or 2 ** 31 self._created_connections = 0 self._available_connections = LightQueue() self._in_use_connections = set()
class LocalMailbox(Mailbox): __slots__ = ['_queue'] def __init__(self): self._queue = LightQueue() def put(self, message): self._queue.put(message) def get(self): return self._queue.get(block=True) def encode(self): raise NotImplementedError @staticmethod def decode(params): raise NotImplementedError
def fail_fast_imap(pool, call, items): """ Run a function against each item in a given list, yielding each function result in turn, where the function call is handled in a :class:`~eventlet.greenthread.GreenThread` spawned by the provided pool. If any function raises an exception, all other ongoing threads are killed, and the exception is raised to the caller. This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`. :param pool: Pool to spawn function threads from :type pool: eventlet.greenpool.GreenPool :param call: Function call to make, expecting to receive an item from the given list """ result_queue = LightQueue(maxsize=len(items)) spawned_threads = set() def handle_result(finished_thread): try: thread_result = finished_thread.wait() spawned_threads.remove(finished_thread) result_queue.put((thread_result, None)) except Exception: spawned_threads.remove(finished_thread) result_queue.put((None, sys.exc_info())) for item in items: gt = pool.spawn(call, item) spawned_threads.add(gt) gt.link(handle_result) while spawned_threads: result, exc_info = result_queue.get() if exc_info is not None: # Kill all other ongoing threads for ongoing_thread in spawned_threads: ongoing_thread.kill() # simply raising here (even raising a full exc_info) isn't # sufficient to preserve the original stack trace. # greenlet.throw() achieves this. eventlet.getcurrent().throw(*exc_info) yield result
def main(myid, queue, concurrency, delay=5.0, duration=DURATION): counter = 0 created = list() results = LightQueue(concurrency * 10) pool = GreenPool(concurrency) api = AccountClient({'namespace': NS}, pool_maxsize=concurrency+1) now = start = checkpoint = time.time() pool.starmap(create_loop, [(api, 'buck-%d-%d' % (myid, n), results) for n in range(concurrency)]) while now - start < duration: try: res = results.get(timeout=delay) created.append(res) counter += 1 except Empty: pass if now - checkpoint > delay: print("Proc %d: %d updates in %fs, %f updates per second." % ( myid, counter, now - checkpoint, counter / (now - checkpoint))) counter = 0 checkpoint = now now = time.time() for coro in pool.coroutines_running: coro.kill() while not results.empty(): created.append(results.get(block=False)) end = time.time() rate = len(created) / (end - start) print("Proc %d: end. %d updates in %fs, %f updates per second." % ( myid, len(created), end - start, rate)) time.sleep(2) print("Proc %d: cleaning..." % myid) del_req = {'dtime': time.time()} # Do not delete twice (or an exception is raised) uniq_ct = set(created) for _ in pool.starmap(api.container_update, [(ACCOUNT, n, del_req) for n in uniq_ct]): pass pool.waitall() queue.put(rate) return 0
def __init__(self, interval=None, queue=None, set_as_current=True): self.set_as_current = set_as_current if self.set_as_current: set_current(self) self._orig_queue_arg = queue self.interval = interval or self.interval self.queue = LightQueue() if queue is None else queue self._pause_mutex = Lock() self._last_update = None gThread.__init__(self) Status.__init__(self)
def fail_fast_imap(pool, call, items): """ Run a function against each item in a given list, yielding each function result in turn, where the function call is handled in a :class:`~eventlet.greenthread.GreenThread` spawned by the provided pool. If any function raises an exception, all other ongoing threads are killed, and the exception is raised to the caller. This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`. :param pool: Pool to spawn function threads from :type pool: eventlet.greenpool.GreenPool :param call: Function call to make, expecting to receive an item from the given list """ result_queue = LightQueue(maxsize=len(items)) spawned_threads = set() def handle_result(finished_thread): try: thread_result = finished_thread.wait() spawned_threads.remove(finished_thread) result_queue.put((thread_result, None)) except Exception as e: spawned_threads.remove(finished_thread) result_queue.put((None, e)) for item in items: gt = pool.spawn(call, item) spawned_threads.add(gt) gt.link(handle_result) while spawned_threads: result, raised_exc = result_queue.get() if raised_exc is not None: # Kill all other ongoing threads for ongoing_thread in spawned_threads: ongoing_thread.kill() raise raised_exc yield result
def load(key): """Load the value stored with the key. Parameters ---------- key : object The key to lookup the value stored. Returns ------- object The value if the key exists in the cache, otherwise None. """ signal = 'cache_load' event = LightQueue(1) if flask.has_request_context(): emit(signal, {'data': pack(key)}, callback=event.put) else: sio = flask.current_app.extensions['socketio'] sio.emit(signal, {'data': pack(key)}, callback=event.put) return msgpack.unpackb(bytes(event.get(timeout=10)), encoding='utf8')
class ProducerPool(object): """Usage:: >>> app = Celery(broker='amqp://') >>> ProducerPool(app) """ Receipt = Receipt def __init__(self, app, size=20): self.app = app self.size = size self.inqueue = LightQueue() self._running = None self._producers = None def apply_async(self, task, args, kwargs, callback=None, **options): if self._running is None: self._running = spawn_n(self._run) receipt = self.Receipt(callback) self.inqueue.put((task, args, kwargs, options, receipt)) return receipt def _run(self): self._producers = [ spawn_n(self._producer) for _ in range(self.size) ] def _producer(self): inqueue = self.inqueue with self.app.producer_or_acquire() as producer: while 1: task, args, kwargs, options, receipt = inqueue.get() result = task.apply_async(args, kwargs, producer=producer, **options) receipt.finished(result)
def __getitem__(self, key): """Load the value stored with the key. Parameters ---------- key : str The key to lookup the value stored. Returns ------- object The value if the key exists in the cache, otherwise None. """ validate(key) signal = 'cache_load' event = LightQueue(1) if flask.has_request_context(): emit(signal, {'data': pack(key)}, callback=event.put) else: sio = flask.current_app.extensions['socketio'] sio.emit(signal, {'data': pack(key)}, callback=event.put) return msgpack.unpackb(bytes(event.get(timeout=10)), encoding='utf8')
def __init__(self, config=None, pool=None, agent=None): self.pool = pool self.agent = agent self.config = config or CONF.ML2_VMWARE self.connection = _create_session(self.config) self.context = agent.context self._monitor_process = VCenterMonitor(self, self.config, connection=self.connection, pool=self.pool) self.queue = Queue(None) self.uuid_port_map = {} self.mac_port_map = {} self.uuid_dvs_map = {} self.network_dvs_map = {} for network, dvs in six.iteritems( dvs_util.create_network_map_from_config( self.config, connection=self.connection, pool=pool)): self.network_dvs_map[network] = dvs self.uuid_dvs_map[dvs.uuid] = dvs
class EventletConnectionPool(ConnectionPool): def __init__(self, connection_class=Connection, max_connections=None, **connection_kwargs): self.pid = os.getpid() self.connection_class = connection_class self.connection_kwargs = connection_kwargs self.max_connections = max_connections or 2 ** 31 self._created_connections = 0 self._available_connections = LightQueue() self._in_use_connections = set() def get_connection(self, command_name, *keys, **options): "Get a connection from the pool" try: connection = self._available_connections.get_nowait() except Empty: if self._created_connections < self.max_connections: connection = self.make_connection() else: try: connection = self._available_connections.get() except Empty: raise ConnectionError("Couldn't find a free connection") self._in_use_connections.add(connection) return connection def release(self, connection): "Releases the connection back to the pool" self._checkpid() if connection.pid == self.pid: self._in_use_connections.remove(connection) self._available_connections.put_nowait(connection) def disconnect(self): "Disconnects all connections in the pool" while True: try: self._available_connections.get_nowait().disconnect() except Empty: break for connection in self._in_use_connections: connection.disconnect()
def __init__(self, maxsize, transfer_size): LightQueue.__init__(self, maxsize) self.transfer_size = transfer_size self.transferred = 0
def __init__(self, logger: Logger): self._logger = logger self._q = LightQueue(maxsize=1000) # ensure no one is writing to the same broker connection concurrently self._sio = storage.utils.SIOManager.create(write_only=True)
def __init__(self): self._queue = LightQueue()
class Supervisor(gThread, Status): """The supervisor wakes up at intervals to monitor changes in the model. It can also be requested to perform specific operations, and these operations can be either async or sync. :keyword interval: This is the interval (in seconds as an int/float), between verifying all the registered instances. :keyword queue: Custom :class:`~Queue.Queue` instance used to send and receive commands. It is responsible for: * Stopping removed instances. * Starting new instances. * Restarting unresponsive/killed instances. * Making sure the instances consumes from the queues specified in the model, sending ``add_consumer``/- ``cancel_consumer`` broadcast commands to the instances as it finds inconsistencies. * Making sure the max/min concurrency setting is as specified in the model, sending ``autoscale`` broadcast commands to the noes as it finds inconsistencies. The supervisor is resilient to intermittent connection failures, and will auto-retry any operation that is dependent on a broker. Since workers cannot respond to broadcast commands while the broker is off-line, the supervisor will not restart affected instances until the instance has had a chance to reconnect (decided by the :attr:`wait_after_broker_revived` attribute). """ #: Limit instance restarts to 1/m, so out of control #: instances will be disabled restart_max_rate = '1/m' #: Default interval_max for ensure_connection is 30 secs. wait_after_broker_revived = 35.0 #: Connection errors pauses the supervisor, so events does not accumulate. paused = False #: Default interval (time in seconds as a float to reschedule). interval = 60.0 def __init__(self, interval=None, queue=None, set_as_current=True): self.set_as_current = set_as_current if self.set_as_current: set_current(self) self._orig_queue_arg = queue self.interval = interval or self.interval self.queue = LightQueue() if queue is None else queue self._pause_mutex = Lock() self._last_update = None gThread.__init__(self) Status.__init__(self) def __copy__(self): return self.__class__(self.interval, self._orig_queue_arg) def pause(self): """Pause all timers.""" self.respond_to_ping() with self._pause_mutex: if not self.paused: self.debug('pausing') self.paused = True def resume(self): """Resume all timers.""" with self._pause_mutex: if self.paused: self.debug('resuming') self.paused = False def verify(self, instances, ratelimit=False): """Verify the consistency of one or more instances. :param instances: List of instances to verify. This operation is asynchronous, and returns a :class:`Greenlet` instance that can be used to wait for the operation to complete. """ return self._request(instances, self._do_verify_instance, {'ratelimit': ratelimit}) def restart(self, instances): """Restart one or more instances. :param instances: List of instances to restart. This operation is asynchronous, and returns a :class:`Greenlet` instance that can be used to wait for the operation to complete. """ return self._request(instances, self._do_restart_instance) def shutdown(self, instances): """Shutdown one or more instances. :param instances: List of instances to stop. This operation is asynchronous, and returns a :class:`Greenlet` instance that can be used to wait for the operation to complete. .. warning:: Note that the supervisor will automatically restart any stopped instances unless the corresponding :class:`Instance` model has been marked as disabled. """ return self._request(instances, self._do_stop_instance) def _request(self, instances, action, kwargs={}): event = Event() self.queue.put_nowait((instances, event, action, kwargs)) return event def before(self): self.start_periodic_timer(self.interval, self._verify_all) def run(self): queue = self.queue self.info('started') supervisor_ready.send(sender=self) while not self.should_stop: try: instances, event, action, kwargs = queue.get(timeout=1) except Empty: self.respond_to_ping() continue self.respond_to_ping() self.debug('wake-up') try: for instance in instances: try: action(instance, **kwargs) except Exception, exc: self.error('Event caused exception: %r', exc) finally: event.send(True) def _verify_all(self, force=False): if self._last_update and self._last_update.ready(): try: self._last_update.wait() # collect result except self.GreenletExit: pass force = True if not self._last_update or force: self._last_update = self.verify(self.all_instances(), ratelimit=True)
def get(self, request): """Provide a streaming interface for the event bus.""" from eventlet.queue import LightQueue, Empty import eventlet cur_hub = eventlet.hubs.get_hub() request.environ['eventlet.minimum_write_chunk_size'] = 0 to_write = LightQueue() stop_obj = object() restrict = request.args.get('restrict') if restrict: restrict = restrict.split(',') def thread_forward_events(event): """Forward events to the open request.""" if event.event_type == EVENT_TIME_CHANGED: return if restrict and event.event_type not in restrict: return _LOGGER.debug('STREAM %s FORWARDING %s', id(stop_obj), event) if event.event_type == EVENT_BLUMATE_STOP: data = stop_obj else: data = json.dumps(event, cls=rem.JSONEncoder) cur_hub.schedule_call_global(0, lambda: to_write.put(data)) def stream(): """Stream events to response.""" self.hass.bus.listen(MATCH_ALL, thread_forward_events) _LOGGER.debug('STREAM %s ATTACHED', id(stop_obj)) last_msg = time() # Fire off one message right away to have browsers fire open event to_write.put(STREAM_PING_PAYLOAD) while True: try: # Somehow our queue.get sometimes takes too long to # be notified of arrival of data. Probably # because of our spawning on hub in other thread # hack. Because current goal is to get this out, # We just timeout every second because it will # return right away if qsize() > 0. # So yes, we're basically polling :( payload = to_write.get(timeout=1) if payload is stop_obj: break msg = "data: {}\n\n".format(payload) _LOGGER.debug('STREAM %s WRITING %s', id(stop_obj), msg.strip()) yield msg.encode("UTF-8") last_msg = time() except Empty: if time() - last_msg > 50: to_write.put(STREAM_PING_PAYLOAD) except GeneratorExit: _LOGGER.debug('STREAM %s RESPONSE CLOSED', id(stop_obj)) break self.hass.bus.remove_listener(MATCH_ALL, thread_forward_events) return self.Response(stream(), mimetype='text/event-stream')
def __init__(self, app, size=20): self.app = app self.size = size self.inqueue = LightQueue() self._running = None self._producers = None
print "%d containers in %fs, %f containers per second." % ( counter, now - checkpoint, counter / (now - checkpoint)) counter = 0 checkpoint = now created.append(res) now = time.time() for coro in POOL.coroutines_running: coro.kill() while not RESULTS.empty(): created.append(RESULTS.get(block=False)) end = time.time() rate = len(created) / (end - start) print "End. %d containers created in %fs, %f containers per second." % ( len(created), end - start, rate) print "Cleaning..." for _ in POOL.starmap(API.container_delete, [('benchmark', n) for n in created]): pass POOL.waitall() return rate if __name__ == '__main__': import os import sys THREADS = int(sys.argv[1]) if len(sys.argv) > 1 else 1 API = ObjectStorageApi(os.getenv('OIO_NS', 'OPENIO')) RESULTS = LightQueue(THREADS * 10) POOL = GreenPool(THREADS) main(THREADS)
class VCenter(object): # PropertyCollector discovers changes on vms and their hardware and produces # (mac, switch, portKey, portGroupKey, connectable.connected, connectable.status) # internally, it keeps internally vm and key for identifying updates # Subsequently, the mac has to be identified with a port # def __init__(self, config=None, pool=None, agent=None): self.pool = pool self.agent = agent self.config = config or CONF.ML2_VMWARE self.connection = _create_session(self.config) self.context = agent.context self._monitor_process = VCenterMonitor(self, self.config, connection=self.connection, pool=self.pool) self.queue = Queue(None) self.uuid_port_map = {} self.mac_port_map = {} self.uuid_dvs_map = {} self.network_dvs_map = {} for network, dvs in six.iteritems( dvs_util.create_network_map_from_config( self.config, connection=self.connection, pool=pool)): self.network_dvs_map[network] = dvs self.uuid_dvs_map[dvs.uuid] = dvs def vcenter_port_changes(self, changed): ports_by_mac = defaultdict(dict) for port_desc in changed: port = { 'port_desc': port_desc, 'port': { 'binding:vif_details': { 'dvs_port_key': port_desc.port_key, 'dvs_uuid': port_desc.dvs_uuid, }, 'mac_address': port_desc.mac_address } } dvs = self.get_dvs_by_uuid(port_desc.dvs_uuid) if not dvs: continue if port_desc.status != 'deleted': dvs.ports_by_key[port_desc.port_key] = port ports_by_mac[port_desc.mac_address] = port else: dvs.ports_by_key.pop(port_desc.port_key, None) ports_by_mac.pop(port_desc.mac_address, None) # self.read_dvs_ports(ports_by_mac) Skip that macs = set(six.iterkeys(ports_by_mac)) port_list = [] for port_id, mac, status, admin_state_up, network_id, network_type, segmentation_id in self.get_ports_by_mac( macs): macs.discard(mac) port_info = ports_by_mac[mac] neutron_info = { "port_id": port_id, "id": port_id, "device": port_id, "mac_address": mac, "admin_state_up": admin_state_up, "status": status, "network_id": network_id, "network_type": network_type, "segmentation_id": segmentation_id, } port_info["port"]["id"] = port_id c_util.dict_merge(port_info, neutron_info) self.uuid_port_map[port_id] = port_info port_list.append(port_info) if macs: LOG.warning( _LW("Could not find the following macs: {}").format(macs)) LOG.debug("Got port information from db for %d ports", len(port_list)) for port in port_list: self.queue.put(port) def start(self): self._monitor_process.start() @staticmethod def update_port_desc(port, port_info): # Validate connectionCookie, so we still have the same instance behind that portKey port_desc = port['port_desc'] connection_cookie = _cast(getattr(port_info, 'connectionCookie', None)) if port_desc.connection_cookie != connection_cookie: LOG.error("Cookie mismatch {} {} {} <> {}".format( port_desc.mac_address, port_desc.port_key, port_desc.connection_cookie, connection_cookie)) return False for k, v in six.iteritems(_DVSPortDesc.from_dvs_port(port_info)): setattr(port_desc, k, v) return True def ports_by_switch_and_key(self, ports): ports_by_switch_and_key = defaultdict(dict) for port in ports: port_desc = port['port_desc'] dvs = self.get_dvs_by_uuid(port_desc.dvs_uuid) if dvs: ports_by_switch_and_key[dvs][port_desc.port_key] = port return ports_by_switch_and_key @c_util.stats.timed() def bind_ports(self, ports, callback=None): ports_by_switch_and_key = self.ports_by_switch_and_key(ports) for dvs, ports_by_key in six.iteritems(ports_by_switch_and_key): specs = [] for port in six.itervalues(ports_by_key): if (port["network_type"] == "vlan" and not port["segmentation_id"] is None) \ or port["network_type"] == "flat": spec = builder.neutron_to_port_config_spec(port) if not CONF.AGENT.dry_run: specs.append(spec) else: LOG.debug(spec) dvs.queue_update_specs(specs, callback=callback) def get_dvs_by_uuid(self, uuid): return self.uuid_dvs_map.get(uuid, None) def get_port_by_uuid(self, uuid): return self.uuid_port_map.get(uuid, None) def fetch_ports_by_mac(self, portgroup_key=None, mac_addr=None): for dvs in six.itervalues(self.uuid_dvs_map): port_keys = dvs._dvs.FetchDVPortKeys( dvs._dvs, criteria=builder.port_criteria()) ports = dvs._dvs.FetchDVPorts(criteria=builder.port_criteria( port_group_key=portgroup_key, port_key=port_keys)) for port in ports: if hasattr(port, 'state'): if hasattr(port.state, 'runtimeInfo'): if mac_addr == port.state.runtimeInfo.macAddress: return port else: continue raise Exception('DVS port not found!') def get_new_ports(self, block=False, timeout=1.0, max_ports=None): ports_by_mac = defaultdict(dict) try: while max_ports is None or len(ports_by_mac) < max_ports: new_port = self.queue.get(block=block, timeout=timeout) port_desc = new_port['port_desc'] block = False # Only block on the first item if port_desc.status == 'deleted': ports_by_mac.pop(port_desc.mac_address, None) port = self.mac_port_map.pop(port_desc.mac_address, None) if port: port_desc = port['port_desc'] self.uuid_port_map.pop(port['id'], None) dvs = self.get_dvs_by_uuid(port_desc.dvs_uuid) dvs.ports_by_key.pop(port_desc.port_key, None) else: port = self.mac_port_map.get(port_desc.mac_address, {}) port.update(dict(new_port)) ports_by_mac[port_desc.mac_address] = port dvs = self.get_dvs_by_uuid(port_desc.dvs_uuid) if dvs: dvs.ports_by_key[port_desc.port_key] = port except Empty: pass return ports_by_mac def read_dvs_ports(self, ports_by_mac): ports_by_switch_and_key = self.ports_by_switch_and_key( six.itervalues(ports_by_mac)) # This loop can get very slow, if get_port_info_by_portkey gets port keys passed of instances, which are only # partly connected, meaning: the instance is associated, but the link is not quite up yet for dvs, ports_by_key in six.iteritems(ports_by_switch_and_key): for port_info in dvs.get_port_info_by_portkey( list( six.iterkeys(ports_by_key))): # View is not sufficient port = ports_by_key[port_info.key] if not VCenter.update_port_desc(port, port_info): port_desc = port['port_desc'] ports_by_mac.pop(port_desc.mac_address) LOG.debug("Read all ports") @enginefacade.reader def get_ports_by_mac(self, mac_addresses): if not mac_addresses: return [] if not self.context: self.context = neutron.context.get_admin_context() session = self.context.session with session.begin(subtransactions=True): return session.query(models_v2.Port.id, models_v2.Port.mac_address, models_v2.Port.status, models_v2.Port.admin_state_up, models_ml2.NetworkSegment.network_id, models_ml2.NetworkSegment.network_type, models_ml2.NetworkSegment.segmentation_id).\ join(models_ml2.PortBindingLevel, models_v2.Port.id == models_ml2.PortBindingLevel.port_id).\ join(models_ml2.NetworkSegment, models_ml2.PortBindingLevel.segment_id == models_ml2.NetworkSegment.id).\ filter(models_ml2.PortBindingLevel.host == self.agent.conf.host, models_ml2.PortBindingLevel.driver == constants.DVS, models_v2.Port.mac_address.in_(mac_addresses), ).all() def stop(self): self._monitor_process.stop() try: while True: self.queue.get_nowait() except Empty: pass