def __init__(self, spider): self.spider = self._spider(spider) self.workers = getattr(self.spider, 'workers', 100) self.timeout = getattr(self.spider, 'timeout', 60) self.start_urls = getattr(self.spider, 'start_urls', []) self.creq = queue.Queue() self.cres = queue.Queue() self.pool = eventlet.GreenPool(self.workers) self.pool.spawn_n(self.dispatcher)
def main(temp_file, namespace): global POOL namespace = None if namespace == 'None' else namespace _queue = queue.Queue() POOL.spawn(ip_lib.ip_monitor, namespace, _queue, EVENT_STOP, EVENT_STARTED) POOL.spawn(read_queue, temp_file, _queue, EVENT_STOP, EVENT_STARTED) POOL.waitall()
def dispatch_l3(self, d_context, args=(), kwargs={}): item = d_context.item event = d_context.event q_context = d_context.q_context chain = d_context.chain item_id = item["id"] # First round validation (Controller level) _validate_operation(event, item["status"], item_id) handlers = router_operations.handlers if event in handlers: for f in handlers[event]: first_run = False if item_id not in self.sync_items: self.sync_items[item_id] = queue.Queue() first_run = True self.sync_items[item_id].put( ctx.OperationContext(event, q_context, item, chain, f, args, kwargs)) if first_run: t = greenthread.spawn(self._consume_l3, item_id, self.sync_items[item_id], self._plugin) if not self._async: t.wait()
def __init__(self, url, user, pw, originator="os-xenapi", timeout=10, concurrent=5): """Initialize session for connection with XenServer/Xen Cloud Platform :param url: URL for connection to XenServer/Xen Cloud Platform :param user: Username for connection to XenServer/Xen Cloud Platform :param pw: Password for connection to XenServer/Xen Cloud Platform :param originator: Specify the caller for this API :param timeout: Timeout in seconds for XenAPI login :param concurrent: Maximum concurrent XenAPI connections """ self.XenAPI = XenAPI self.originator = originator self.timeout = timeout self.concurrent = concurrent self._sessions = queue.Queue() self.host_checked = False self.is_slave = False self.ip = self._get_ip_from_url(url) self.url = url self.master_url = self._create_first_session(url, user, pw) self._populate_session_pool(self.master_url, user, pw) self.host_ref = self._get_host_ref(self.ip) self.host_uuid = self._get_host_uuid() self.product_version, self.product_brand = \ self._get_product_version_and_brand() self._verify_plugin_version() self.platform_version = self._get_platform_version() self._cached_xsm_sr_relaxed = None apply_session_helpers(self)
def dispatch_l3(self, d_context, args=(), kwargs={}): item = d_context.item event = d_context.event n_context = d_context.n_context chain = d_context.chain item_id = item["id"] handlers = router_operations.handlers if event in handlers: for f in handlers[event]: first_run = False if item_id not in self.sync_items: self.sync_items[item_id] = (queue.Queue(), ) first_run = True self.sync_items[item_id][0].put( ctx.OperationContext(event, n_context, item, chain, f, args, kwargs)) t = None if first_run: t = greenthread.spawn(self._consume_l3, item_id, self.sync_items[item_id][0], self._plugin, self._async) self.sync_items[item_id] += (t, ) if not self._async: t = self.sync_items[item_id][1] t.wait()
def dispatch_lb(self, d_context, *args, **kwargs): item = d_context.item event = d_context.event n_context = d_context.n_context chain = d_context.chain item_id = item["id"] if event in self.handlers: for f in self.handlers[event]: first_run = False if item_id not in self.sync_items: self.sync_items[item_id] = [queue.Queue()] first_run = True self.sync_items[item_id][0].put( ctx.OperationContext(event, n_context, item, chain, f, args, kwargs)) if first_run: t = greenthread.spawn(self._consume_lb, item_id, self.sync_items[item_id][0], self._driver, self._async) self.sync_items[item_id].append(t) if not self._async: t = self.sync_items[item_id][1] t.wait()
def __init__(self, url, user, pw): version_string = version.version_string_with_package() self.nova_version = ('%(vendor)s %(product)s %(version)s' % { 'vendor': version.vendor_string(), 'product': version.product_string(), 'version': version_string }) import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() self.is_slave = False self.host_checked = False exception = self.XenAPI.Failure( _("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) self.url = self._create_first_session(url, user, pw, exception) self._populate_session_pool(url, user, pw, exception) self.host_uuid = self._get_host_uuid() self.host_ref = self._get_host_ref() self.product_version, self.product_brand = \ self._get_product_version_and_brand() self._verify_plugin_version() apply_session_helpers(self)
def _setup_response_queue(self, message): """Set up an eventlet queue to use to wait for replies. Replies come back from the target cell as a _ResponseMessage being sent back to the source. """ resp_queue = queue.Queue() self.response_queues[message.uuid] = resp_queue return resp_queue
def __init__(self, max_workers=1000): assert EVENTLET_AVAILABLE, 'eventlet is needed to use a green executor' if max_workers <= 0: raise ValueError("Max workers must be greater than zero") self._max_workers = max_workers self._pool = greenpool.GreenPool(self._max_workers) self._delayed_work = greenqueue.Queue() self._shutdown_lock = greenthreading.Lock() self._shutdown = False
def test_task_done(self): channel = queue.Queue(0) X = object() gt = eventlet.spawn(channel.put, X) result = channel.get() assert result is X, (result, X) assert channel.unfinished_tasks == 1, channel.unfinished_tasks channel.task_done() assert channel.unfinished_tasks == 0, channel.unfinished_tasks gt.wait()
def __init__(self, url, user, pw): self.XenAPI = self.get_imported_xenapi() self._sessions = queue.Queue() self.is_slave = False exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) self._populate_session_pool(url, user, pw, exception) self.host_uuid = self._get_host_uuid() self.product_version = self._get_product_version()
def __init__(self, url, user, pw): self.XenAPI = self.get_imported_xenapi() self._sessions = queue.Queue() exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) for i in xrange(FLAGS.xenapi_connection_concurrent): session = self._create_session(url) with timeout.Timeout(FLAGS.xenapi_login_timeout, exception): session.login_with_password(user, pw) self._sessions.put(session)
def __init__(self, nb_api): self._queue = queue.Queue() self.publisher = _get_publisher() self.subscriber = self._get_subscriber() self.nb_api = nb_api self.db = self.nb_api.driver self.uuid = pub_sub_api.generate_publisher_uuid() self._rate_limit = df_utils.RateLimiter( cfg.CONF.df.publisher_rate_limit_count, cfg.CONF.df.publisher_rate_limit_timeout, )
def setUp(self): super(SchedulerServiceTest, self).setUp() self.timeout = timeout.Timeout(seconds=10) self.queue = queue.Queue() self.scheduler = scheduler.Scheduler(0, 1, None) self.scheduler.start() self.addCleanup(self.scheduler.stop, True) self.addCleanup(self.timeout.cancel)
def test_get_nowait_simple(self): hub = hubs.get_hub() result = [] q = queue.Queue(1) q.put(4) hub.schedule_call_global(0, store_result, result, q.get_nowait) hub.schedule_call_global(0, store_result, result, q.get_nowait) eventlet.sleep(0) assert len(result) == 2, result assert result[0] == 4, result assert isinstance(result[1], queue.Empty), result
def __init__(self): self._queue = queue.Queue() self.publisher = self._get_publisher() self.multiproc_subscriber = self._get_multiproc_subscriber() self.db = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) self.uuid = pub_sub_api.generate_publisher_uuid() self._rate_limit = df_utils.RateLimiter( cfg.CONF.df.publisher_rate_limit_count, cfg.CONF.df.publisher_rate_limit_timeout, )
def __init__(self, max_workers=1000): eu.check_for_eventlet( RuntimeError('Eventlet is needed to use a green' ' executor')) if max_workers <= 0: raise ValueError("Max workers must be greater than zero") self._max_workers = max_workers self._pool = greenpool.GreenPool(self._max_workers) self._delayed_work = greenqueue.Queue() self._shutdown_lock = greenthreading.Lock() self._shutdown = False self._gatherer = _Gatherer(self._submit, lock_cls=greenthreading.Lock)
def __init__(self): server = eventlet.listen((self.HOST, self.PORT)) addr = server.getsockname() print('Listening on {}'.format(addr)) while True: client_sock, addr = server.accept() q = queue.Queue() self.send_queue[client_sock.fileno()] = q eventlet.spawn_n(self.handle_client_recv, client_sock, addr) eventlet.spawn_n(self.handle_client_send, client_sock, q, addr) print('Connection from {}'.format(addr))
def __init__(self, url, user, pw, virtapi): import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() self.is_slave = False exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) self._populate_session_pool(url, user, pw, exception) self.host_uuid = self._get_host_uuid() self.product_version, self.product_brand = \ self._get_product_version_and_brand() self._virtapi = virtapi
def setUp(self): super(LegacySchedulerTest, self).setUp() self.timeout = timeout.Timeout(seconds=10) self.queue = queue.Queue() self.override_config('fixed_delay', 1, 'scheduler') self.override_config('random_delay', 0, 'scheduler') self.override_config('batch_size', 100, 'scheduler') self.scheduler = legacy_scheduler.LegacyScheduler(CONF.scheduler) self.scheduler.start() self.addCleanup(self.scheduler.stop, True) self.addCleanup(self.timeout.cancel)
def __init__(self, url, user, pw): import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() self.is_subordinate = False exception = self.XenAPI.Failure( _("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) self._populate_session_pool(url, user, pw, exception) self.host_uuid = self._get_host_uuid() self.host_ref = self._get_host_ref() self.product_version, self.product_brand = \ self._get_product_version_and_brand() self._verify_plugin_version() apply_session_helpers(self)
def test_put_nowait_unlock(self): hub = hubs.get_hub() result = [] q = queue.Queue(0) eventlet.spawn(q.get) assert q.empty(), q assert q.full(), q eventlet.sleep(0) assert q.empty(), q assert q.full(), q hub.schedule_call_global(0, store_result, result, q.put_nowait, 10) # TODO ready method on greenthread # assert not p.ready(), p eventlet.sleep(0) assert result == [None], result # TODO ready method # assert p.ready(), p assert q.full(), q assert q.empty(), q
def test_get_nowait_unlock(self): hub = hubs.get_hub() result = [] q = queue.Queue(0) p = eventlet.spawn(q.put, 5) assert q.empty(), q assert q.full(), q eventlet.sleep(0) assert q.empty(), q assert q.full(), q hub.schedule_call_global(0, store_result, result, q.get_nowait) eventlet.sleep(0) assert q.empty(), q assert q.full(), q assert result == [5], result # TODO add ready to greenthread # assert p.ready(), p assert p.dead, p assert q.empty(), q
def _compare_profilers_in_parent_and_in_child(self, init_profiler): q = queue.Queue() def is_profiler_initialized(where): # Instead of returning a single boolean add information so we can # identify which thread produced the result without depending on # queue order. return {where: bool(profiler.get())} def thread_with_no_leaked_profiler(): if init_profiler: profiler.init(hmac_key='fake secret') self.spawn_variant( lambda: q.put(is_profiler_initialized('in-child'))) q.put(is_profiler_initialized('in-parent')) # Make sure in parent we start with an uninitialized profiler by # eventlet.spawn()-ing a new thread. Otherwise the unit test runner # thread may leak an initialized profiler from one test to another. eventlet.spawn(thread_with_no_leaked_profiler) # In order to have some global protection against leaking initialized # profilers neutron.test.base.BaseTestCase.setup() also calls # addCleanup(profiler.clean) # Merge the results independently of queue order. results = {} results.update(q.get()) results.update(q.get()) self.assertEqual( { 'in-parent': init_profiler, 'in-child': init_profiler }, results)
def __init__(self, url, user, pw): self.XenAPI = self.get_imported_xenapi() self._sessions = queue.Queue() self.host_uuid = None exception = self.XenAPI.Failure( _("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) is_slave = False for i in xrange(FLAGS.xenapi_connection_concurrent): try: session = self._create_session(url) with timeout.Timeout(FLAGS.xenapi_login_timeout, exception): session.login_with_password(user, pw) except self.XenAPI.Failure, e: # if user and pw of the master are different, we're doomed! if e.details[0] == 'HOST_IS_SLAVE': master = e.details[1] session = self.XenAPI.Session( pool.swap_xapi_host(url, master)) session.login_with_password(user, pw) is_slave = True else: raise self._sessions.put(session)
def queue(self, *args, **kwargs): return queue.Queue(*args, **kwargs)
""" fd = sock.fileno() # Get send queue for this client q = send_queues.get(fd, None) # If we find a queue then this disconnect has not yet # been handled if q: q.put(None) del send_queues[fd] addr = sock.getpeername() print('Client {} disconnected'.format(addr)) sock.close() if __name__ == '__main__': server = eventlet.listen((HOST, PORT)) addr = server.getsockname() print('Listening on {}'.format(addr)) while True: client_sock,addr = server.accept() q = queue.Queue() send_queues[client_sock.fileno()] = q eventlet.spawn_n(handle_client_recv, client_sock, addr) eventlet.spawn_n(handle_client_send, client_sock, q, addr) print('Connection from {}'.format(addr))
def __init__(self): super(RethinkDbDriver, self).__init__() self._pool = queue.Queue() self._pool_size = 0 self._pool_lock = threading.Lock()
def __init__(self, consumer): self._consumer = consumer self._results = queue.Queue() self._closed = False self._got_ending = False
def handle_node_request(configmanager, inputdata, operation, pathcomponents, autostrip=True): if log.logfull: raise exc.TargetResourceUnavailable( 'Filesystem full, free up space and restart confluent service') iscollection = False routespec = None if pathcomponents[0] == 'noderange': if len(pathcomponents) > 3 and pathcomponents[2] == 'nodes': # transform into a normal looking node request # this does mean we don't see if it is a valid # child, but that's not a goal for the noderange # facility anyway isnoderange = False pathcomponents = pathcomponents[2:] elif len(pathcomponents) == 3 and pathcomponents[2] == 'abbreviate': return abbreviate_noderange(configmanager, inputdata, operation) else: isnoderange = True else: isnoderange = False try: nodeorrange = pathcomponents[1] if not isnoderange and not configmanager.is_node(nodeorrange): raise exc.NotFoundException("Invalid Node") if isnoderange and not (len(pathcomponents) == 3 and pathcomponents[2] == 'abbreviate'): try: nodes = noderange.NodeRange(nodeorrange, configmanager).nodes except Exception as e: raise exc.NotFoundException("Invalid Noderange: " + str(e)) else: nodes = (nodeorrange, ) except IndexError: # doesn't actually have a long enough path # this is enumerating a list of nodes or just empty noderange if isnoderange and operation == "retrieve": return iterate_collections([]) elif isnoderange and operation == "create": inputdata = msg.InputAttributes(pathcomponents, inputdata) return create_noderange(inputdata.attribs, configmanager) elif isnoderange or operation == "delete": raise exc.InvalidArgumentException() if operation == "create": inputdata = msg.InputAttributes(pathcomponents, inputdata) return create_node(inputdata.attribs, configmanager) allnodes = list(configmanager.list_nodes()) try: allnodes.sort(key=noderange.humanify_nodename) except TypeError: allnodes.sort() return iterate_collections(allnodes) if (isnoderange and len(pathcomponents) == 3 and pathcomponents[2] == 'nodes'): # this means that it's a list of relevant nodes nodes = list(nodes) try: nodes.sort(key=noderange.humanify_nodename) except TypeError: nodes.sort() return iterate_collections(nodes) if len(pathcomponents) == 2: iscollection = True else: try: routespec = nested_lookup(noderesources, pathcomponents[2:]) except KeyError: raise exc.NotFoundException("Invalid element requested") if isinstance(routespec, dict): iscollection = True elif isinstance(routespec, PluginCollection): iscollection = False # it is a collection, but plugin defined elif routespec is None: raise exc.InvalidArgumentException( 'Custom interface required for resource') if iscollection: if operation == "delete": return delete_node_collection(pathcomponents, configmanager, isnoderange) elif operation == "retrieve": return enumerate_node_collection(pathcomponents, configmanager) else: raise Exception("TODO here") del pathcomponents[0:2] passvalues = queue.Queue() plugroute = routespec.routeinfo msginputdata = msg.get_input_message(pathcomponents, operation, inputdata, nodes, isnoderange, configmanager) if 'handler' in plugroute: # fixed handler definition, easy enough if isinstance(plugroute['handler'], str): hfunc = getattr(pluginmap[plugroute['handler']], operation) else: hfunc = getattr(plugroute['handler'], operation) passvalue = hfunc(nodes=nodes, element=pathcomponents, configmanager=configmanager, inputdata=msginputdata) if isnoderange: return passvalue elif isinstance(passvalue, console.Console): return [passvalue] else: return stripnode(passvalue, nodes[0]) elif 'pluginattrs' in plugroute: nodeattr = configmanager.get_node_attributes( nodes, plugroute['pluginattrs'] + ['collective.manager']) plugpath = None nodesbymanager = {} nodesbyhandler = {} badcollnodes = [] for node in nodes: for attrname in plugroute['pluginattrs']: if attrname in nodeattr[node]: plugpath = nodeattr[node][attrname]['value'] elif 'default' in plugroute: plugpath = plugroute['default'] if plugpath in dispatch_plugins: cfm.check_quorum() manager = nodeattr[node].get('collective.manager', {}).get('value', None) if manager: if collective.get_myname() != manager: if manager not in nodesbymanager: nodesbymanager[manager] = set([node]) else: nodesbymanager[manager].add(node) continue elif list(cfm.list_collective()): badcollnodes.append(node) continue if plugpath: try: hfunc = getattr(pluginmap[plugpath], operation) except KeyError: nodesbyhandler[BadPlugin(node, plugpath).error] = [node] continue if hfunc in nodesbyhandler: nodesbyhandler[hfunc].append(node) else: nodesbyhandler[hfunc] = [node] for bn in badcollnodes: nodesbyhandler[BadCollective(bn).error] = [bn] workers = greenpool.GreenPool() numworkers = 0 for hfunc in nodesbyhandler: numworkers += 1 workers.spawn( addtoqueue, passvalues, hfunc, { 'nodes': nodesbyhandler[hfunc], 'element': pathcomponents, 'configmanager': configmanager, 'inputdata': msginputdata }) for manager in nodesbymanager: numworkers += 1 workers.spawn( addtoqueue, passvalues, dispatch_request, { 'nodes': nodesbymanager[manager], 'manager': manager, 'element': pathcomponents, 'configmanager': configmanager, 'inputdata': inputdata, 'operation': operation, 'isnoderange': isnoderange }) if isnoderange or not autostrip: return iterate_queue(numworkers, passvalues) else: if numworkers > 0: return iterate_queue(numworkers, passvalues, nodes[0]) else: raise exc.NotImplementedException()