def _full_updatemacmap(configmanager): global vintage global _macmap global _nodesbymac global _switchportmap global _macsbyswitch global switchbackoff start = util.monotonic_time() with mapupdating: vintage = util.monotonic_time() # Clear all existing entries _macmap = {} _nodesbymac = {} _switchportmap = {} _macsbyswitch = {} if configmanager.tenant is not None: raise exc.ForbiddenRequest( 'Network topology not available to tenants') # here's a list of switches... need to add nodes that are switches nodelocations = configmanager.get_node_attributes( configmanager.list_nodes(), ('net*.switch', 'net*.switchport')) switches = set([]) for node in nodelocations: cfg = nodelocations[node] for attr in cfg: if not attr.endswith('.switch') or 'value' not in cfg[attr]: continue curswitch = cfg[attr].get('value', None) if not curswitch: continue switches.add(curswitch) switchportattr = attr + 'port' if switchportattr in cfg: portname = cfg[switchportattr].get('value', '') if not portname: continue if curswitch not in _switchportmap: _switchportmap[curswitch] = {} if portname in _switchportmap[curswitch]: log.log({ 'error': 'Duplicate switch topology config ' 'for {0} and {1}'.format( node, _switchportmap[curswitch][portname]) }) _switchportmap[curswitch][portname] = None else: _switchportmap[curswitch][portname] = node switchauth = get_switchcreds(configmanager, switches) pool = GreenPool(64) for ans in pool.imap(_map_switch, switchauth): vintage = util.monotonic_time() yield ans endtime = util.monotonic_time() duration = endtime - start duration = duration * 15 # wait 15 times as long as it takes to walk # avoid spending a large portion of the time hitting switches with snmp # requests if duration > switchbackoff: switchbackoff = duration
def f(args): def x(*y): print(y) grp = GreenPool() list(grp.starmap(x, [[1, 2, 3] for x in range(1000)])) grp.waitall()
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) self.configuration = Configuration(volume_manager_opts, config_group=service_name) self._tp = GreenPool() if not volume_driver: # Get from configuration, which will get the default # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: LOG.warn( _("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver) volume_driver = MAPPING[volume_driver] if volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver': # Deprecated in Havana # Not handled in MAPPING because it requires setting a conf option LOG.warn( _("ThinLVMVolumeDriver is deprecated, please configure " "LVMISCSIDriver and lvm_type=thin. Continuing with " "those settings.")) volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver' self.configuration.lvm_type = 'thin' self.driver = importutils.import_object( volume_driver, configuration=self.configuration, db=self.db)
def __init__(self, controllercls, connection, exchange, topic, pool=None, poolsize=1000): self.nodeid = UIDGEN() if pool is None: self.procpool = GreenPool(size=poolsize) else: self.procpool = pool self.connection = connection self.controller = controllercls() self.topic = topic self.greenlet = None self.messagesem = Semaphore() self.consume_ready = Event() node_topic = "{}.{}".format(self.topic, self.nodeid) self.queues = [ entities.get_topic_queue(exchange, topic), entities.get_topic_queue(exchange, node_topic), entities.get_fanout_queue(topic), ] self._channel = None self._consumers = None
def tests(status, test): pool = GreenPool(size=500) for host, s in status['servers'].iteritems(): for t in test: if t.name in s: pool.spawn_n(t.test, host, s) pool.waitall()
def __init__(self, namespace, concurrency=50, error_file=None): self.pool = GreenPool(concurrency) self.error_file = error_file if self.error_file: f = open(self.error_file, 'a') self.error_writer = csv.writer(f, delimiter=' ') conf = {'namespace': namespace} self.account_client = AccountClient(conf) self.container_client = ContainerClient(conf) self.blob_client = BlobClient() self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {}
def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = [] self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info(_('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.expiring_objects_account): container = c['name'] timestamp = int(container) if timestamp > int(time()): break containers_to_delete.append(container) for o in self.swift.iter_objects(self.expiring_objects_account, container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (container, obj)). hexdigest(), 16) if obj_process % processes != process: continue timestamp, actual_obj = obj.split('-', 1) timestamp = int(timestamp) if timestamp > int(time()): break pool.spawn_n( self.delete_object, actual_obj, timestamp, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %s %s') % (container, str(err))) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception'))
def run_once(self, *args, **kwargs): processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) self.report_first_time = self.report_last_time = time() self.report_objects = 0 self.report_containers = 0 containers_to_delete = [] try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.sample_account) self.logger.info(_('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.sample_account): container = c['name'] try: timestamp, account = container.split('_', 1) timestamp = float(timestamp) except ValueError: self.logger.debug('ValueError: %s, ' 'need more than 1 value to unpack' % \ container) else: if processes > 0: obj_proc = int(hashlib.md5(container).hexdigest(), 16) if obj_proc % processes != process: continue n = (float(time()) // self.sample_rate) * self.sample_rate if timestamp <= n: containers_to_delete.append(container) pool.spawn_n(self.aggregate_container, container) pool.waitall() for container in containers_to_delete: try: self.logger.debug('delete container: %s' % container) self.swift.delete_container(self.sample_account, container, acceptable_statuses=( 2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %s %s') % (container, str(err))) tenants_to_fillup = list() for c in self.swift.iter_containers(self.aggregate_account): tenant_id = c['name'] if processes > 0: c_proc = int(hashlib.md5(tenant_id).hexdigest(), 16) if c_proc % processes != process: continue tenants_to_fillup.append(tenant_id) # fillup lossed usage data self.fillup_lossed_usage_data(tenants_to_fillup) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception'))
def __init__(self, local, remote=None, retries=3, log=None, pool_size=None, wait_for_remote=False): self.local = local self.remote = remote or [] self.retries = retries self.log = log or logging self.pool = GreenPool(pool_size) if pool_size else GreenPool() self.wait_for_remote = wait_for_remote
def update_macmap(configmanager): """Interrogate switches to build/update mac table Begin a rebuild process. This process is a generator that will yield as each switch interrogation completes, allowing a caller to recheck the cache as results become possible, rather than having to wait for the process to complete to interrogate. """ global _macmap global _nodesbymac global _switchportmap # Clear all existing entries _macmap = {} _nodesbymac = {} _switchportmap = {} if configmanager.tenant is not None: raise exc.ForbiddenRequest('Network topology not available to tenants') nodelocations = configmanager.get_node_attributes( configmanager.list_nodes(), ('hardwaremanagement.switch', 'hardwaremanagement.switchport')) switches = set([]) for node in nodelocations: cfg = nodelocations[node] if 'hardwaremanagement.switch' in cfg: curswitch = cfg['hardwaremanagement.switch']['value'] switches.add(curswitch) if 'hardwaremanagement.switchport' in cfg: portname = cfg['hardwaremanagement.switchport']['value'] if curswitch not in _switchportmap: _switchportmap[curswitch] = {} if portname in _switchportmap[curswitch]: log.log({ 'warning': 'Duplicate switch topology config for ' '{0} and {1}'.format( node, _switchportmap[curswitch][portname]) }) _switchportmap[curswitch][portname] = node switchcfg = configmanager.get_node_attributes( switches, ('secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'), decrypt=True) switchauth = [] for switch in switches: password = '******' user = None if (switch in switchcfg and 'secret.hardwaremanagementpassword' in switchcfg[switch]): password = switchcfg[switch]['secret.hardwaremanagementpassword'][ 'value'] if 'secret.hardwaremanagementuser' in switchcfg[switch]: user = switchcfg[switch]['secret.hardwaremanagementuser'][ 'value'] switchauth.append((switch, password, user)) pool = GreenPool() for res in pool.imap(_map_switch, switchauth): yield res print(repr(_macmap))
def __init__(self, games_url): """ :param thread_pool: """ self._data = _WorkerManagerData() self.games_url = games_url self._pool = GreenPool(size=3) super(WorkerManager, self).__init__()
def __init__(self, game_state, users_url): """ :param thread_pool: """ self._data = _WorkerManagerData(game_state, {}) self.users_url = users_url self._pool = GreenPool(size=3) super(WorkerManager, self).__init__()
def test_high_client_load(): eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6805") clients = GreenPool() for i in xrange(0, 100): clients.spawn(fake_client, "tcp://127.0.0.1:6804", "%s:%s" % (os.getpid(), i)) clients.waitall()
def update_macmap(configmanager): """Interrogate switches to build/update mac table Begin a rebuild process. This process is a generator that will yield as each switch interrogation completes, allowing a caller to recheck the cache as results become possible, rather than having to wait for the process to complete to interrogate. """ global _macmap global _nodesbymac global _switchportmap # Clear all existing entries _macmap = {} _nodesbymac = {} _switchportmap = {} if configmanager.tenant is not None: raise exc.ForbiddenRequest('Network topology not available to tenants') nodelocations = configmanager.get_node_attributes( configmanager.list_nodes(), ('hardwaremanagement.switch', 'hardwaremanagement.switchport')) switches = set([]) for node in nodelocations: cfg = nodelocations[node] if 'hardwaremanagement.switch' in cfg: curswitch = cfg['hardwaremanagement.switch']['value'] switches.add(curswitch) if 'hardwaremanagement.switchport' in cfg: portname = cfg['hardwaremanagement.switchport']['value'] if curswitch not in _switchportmap: _switchportmap[curswitch] = {} if portname in _switchportmap[curswitch]: log.log({'warning': 'Duplicate switch topology config for ' '{0} and {1}'.format(node, _switchportmap[ curswitch][ portname])}) _switchportmap[curswitch][portname] = node switchcfg = configmanager.get_node_attributes( switches, ('secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'), decrypt=True) switchauth = [] for switch in switches: password = '******' user = None if (switch in switchcfg and 'secret.hardwaremanagementpassword' in switchcfg[switch]): password = switchcfg[switch]['secret.hardwaremanagementpassword'][ 'value'] if 'secret.hardwaremanagementuser' in switchcfg[switch]: user = switchcfg[switch]['secret.hardwaremanagementuser'][ 'value'] switchauth.append((switch, password, user)) pool = GreenPool() for res in pool.imap(_map_switch, switchauth): yield res print(repr(_macmap))
def _get_patches(self, project): commits = self.repository_rpc.get_commits(project) chunks = utilities.chunk(commits, size=round(len(commits) * 0.01)) pool = GreenPool(os.cpu_count()) arguments = [(project, c, self.repository_rpc) for c in chunks] patches = list() for _patches in pool.starmap(_get_patches, arguments): patches.extend(_patches) return PatchSchema(many=True).load(patches)
def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = set([]) self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug('Run begin') containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info(_('Pass beginning; ' '%(containers)s possible containers; ' '%(objects)s possible objects') % { 'containers': containers, 'objects': objects}) for container, obj in self.iter_cont_objs_to_expire(): containers_to_delete.add(container) if not obj: continue timestamp, actual_obj = obj.split('-', 1) timestamp = int(timestamp) if timestamp > int(time()): break pool.spawn_n( self.delete_object, actual_obj, timestamp, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %(container)s ' '%(err)s') % {'container': container, 'err': str(err)}) self.logger.debug('Run end') self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception'))
def _parallel_execute(self, operation, *args): def _spawn(context, operation, fabric_name, conn, *args): # Inherit this thread's context from the parent context.update_store() @lockutils.synchronized(fabric_name, 'fcfabric-', True) def _locked_spawn(operation, fabric_name, conn, *args): return operation(fabric_name, conn, *args) return _locked_spawn(operation, fabric_name, conn, *args) """ Perform an operation against all fabrics, consolidate the responses into a dictionary keyed on fabric name. """ pool = GreenPool(size=len(self.fabrics)) # Obtain our current context so that we can make sure that our child # threads have the same context, so that we can correlate log messages # that they generate. context = getattr(local.store, 'context', None) threads = {} for fabric_name, conn in self.fabrics.iteritems(): thread = pool.spawn(_spawn, context, operation, fabric_name, conn, *args) threads[fabric_name] = thread # Collect the responses. This may raise exceptions when we call wait() # If they do, we collect them and raise a collective exception at the # end. responses = {} exceptions = [] for fabric_name, thread in threads.iteritems(): try: responses[fabric_name] = thread.wait() except Exception as e: """ FabricExceptions can indicate that a backtrace is not required if they contain sufficient debug information themselves. """ if (not isinstance(e, exception.FabricException) or e.backtrace_needed): LOG.exception(e) exceptions.append(e) # If any exceptions were raised, we throw an exception that # encapsulates them all. if exceptions: raise exception.ZoneManagerParallel(exceptions) return responses
def __init__(self, size=0, ignore_errors_num=0): """Initialises. :param size: the max number of parallel tasks :param ignore_errors_num: number of errors which does not stop the execution """ self.executor = GreenPool(max(size, self.MIN_POOL_SIZE)) self.ignore_errors_num = ignore_errors_num self.errors = [] self.tasks = set()
def test_high_workload(): # fire up three services to receive in roundrobin style, giving # each an ident so we can make sure they're working that way eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 1) eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 2) eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 3) clients = GreenPool() # fire up a bunch of clients to thrash it at random for i in xrange(0, 100): clients.spawn(fake_client, "tcp://127.0.0.1:6802", "%s:%s" % (os.getpid(), i)) clients.waitall()
def __init__(self, service_cls, config, worker_ctx_cls=None): self.service_cls = service_cls self.config = config if worker_ctx_cls is not None: warnings.warn( "The constructor of `ServiceContainer` has changed. " "The `worker_ctx_cls` kwarg is now deprecated. See CHANGES, " "version 2.4.0 for more details. This warning will be removed " "in version 2.6.0", DeprecationWarning) else: worker_ctx_cls = WorkerContext self.worker_ctx_cls = worker_ctx_cls self.service_name = get_service_name(service_cls) self.shared_extensions = {} self.max_workers = (config.get(MAX_WORKERS_CONFIG_KEY) or DEFAULT_MAX_WORKERS) self.serializer = config.get(SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER) self.accept = [self.serializer] self.entrypoints = SpawningSet() self.dependencies = SpawningSet() self.subextensions = SpawningSet() for attr_name, dependency in inspect.getmembers( service_cls, is_dependency): bound = dependency.bind(self.interface, attr_name) self.dependencies.add(bound) self.subextensions.update(iter_extensions(bound)) for method_name, method in inspect.getmembers(service_cls, is_method): entrypoints = getattr(method, ENTRYPOINT_EXTENSIONS_ATTR, []) for entrypoint in entrypoints: bound = entrypoint.bind(self.interface, method_name) self.entrypoints.add(bound) self.subextensions.update(iter_extensions(bound)) self.started = False self._worker_pool = GreenPool(size=self.max_workers) self._worker_threads = {} self._managed_threads = {} self._being_killed = False self._died = Event()
def _update_neighbors_backend(configmanager, force): global _neighdata global _neighbypeerid vintage = _neighdata.get('!!vintage', 0) now = util.monotonic_time() if vintage > (now - 60) and not force: return _neighdata = {'!!vintage': now} _neighbypeerid = {'!!vintage': now} switches = netutil.list_switches(configmanager) switchcreds = netutil.get_switchcreds(configmanager, switches) switchcreds = [ x + (force,) for x in switchcreds] pool = GreenPool(64) for ans in pool.imap(_extract_neighbor_data, switchcreds): yield ans
def __init__(self, service_name=None, *args, **kwargs): """Load the specified in args, or flags.""" # update_service_capabilities needs service_name to be volume super(CinderBackupProxy, self).__init__(service_name='backup', *args, **kwargs) self.configuration = Configuration(volume_backup_opts, config_group=service_name) self._tp = GreenPool() self.volume_api = volume.API() self._last_info_volume_state_heal = 0 self._change_since_time = None self.volumes_mapping_cache = {'backups': {}} self.init_flag = False self.backup_cache = [] self.tenant_id = self._get_tenant_id() self.adminCinderClient = self._get_cascaded_cinder_client()
def discovery(status, test): pool = GreenPool(size=500) for d in settings.discovery: servers = d().get_servers() # [('ip', 'host')] for server in servers: ip = server[0] host = server[1] if host in settings.exclude: continue if host not in status["servers"]: # do discovery status["servers"][host] = {} logging.info("performing discovery on %r", server) for t in test: pool.spawn_n(t.discover, ip, status["servers"][host]) status["servers"][host]["ip"] = ip pool.waitall()
class Control(object): thread_pool = GreenPool() @staticmethod def control_center(argv, test_flag): max_try = Config.max_try for time in range(max_try): if test_flag: if Config.debug: Control.debug_control(argv) else: Control.release_control(argv) return @staticmethod def debug_control(argv): for item in argv['iterable']: argv['function'](item) return @staticmethod def release_control(argv): try: for _ in Control.thread_pool.imap(argv['function'], argv['iterable']): pass except Exception: # 报错全部pass掉 # 等用户反馈了再开debug查 pass return
def __init__(self): super(AclResource, self).__init__() self.acl_manager = AclManager(environ.env) self.last_cleared = datetime.utcnow() self.executor = GreenPool() self.request = request self.env = environ.env
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) self.configuration = Configuration(volume_manager_opts, config_group=service_name) self._tp = GreenPool() if not volume_driver: # Get from configuration, which will get the default # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: LOG.warn(_("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver) volume_driver = MAPPING[volume_driver] if volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver': # Deprecated in Havana # Not handled in MAPPING because it requires setting a conf option LOG.warn(_("ThinLVMVolumeDriver is deprecated, please configure " "LVMISCSIDriver and lvm_type=thin. Continuing with " "those settings.")) volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver' self.configuration.lvm_type = 'thin' self.driver = importutils.import_object( volume_driver, configuration=self.configuration, db=self.db)
def imap(self, func, *args): reqid = request_id() def impl(*args): set_request_id(reqid) return func(*args) return GreenPool.imap(self, impl, *args)
def imap(requests, prefetch=True, size=2): """Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param prefetch: If False, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 """ def send(r): r.send(prefetch) return r.response pool = GreenPool(size) for r in pool.imap(send, requests): yield r pool.waitall()
def __init__(self, namespace, concurrency=50, error_file=None, rebuild_file=None, full=True, limit_listings=0, request_attempts=1): self.pool = GreenPool(concurrency) self.error_file = error_file self.full = bool(full) # Optimisation for when we are only checking one object # or one container. # 0 -> do not limit # 1 -> limit account listings (list of containers) # 2 -> limit container listings (list of objects) self.limit_listings = limit_listings if self.error_file: f = open(self.error_file, 'a') self.error_writer = csv.writer(f, delimiter=' ') self.rebuild_file = rebuild_file if self.rebuild_file: fd = open(self.rebuild_file, 'a') self.rebuild_writer = csv.writer(fd, delimiter='|') conf = {'namespace': namespace} self.account_client = AccountClient( conf, max_retries=request_attempts - 1) self.container_client = ContainerClient( conf, max_retries=request_attempts - 1, request_attempts=request_attempts) self.blob_client = BlobClient() self.accounts_checked = 0 self.containers_checked = 0 self.objects_checked = 0 self.chunks_checked = 0 self.account_not_found = 0 self.container_not_found = 0 self.object_not_found = 0 self.chunk_not_found = 0 self.account_exceptions = 0 self.container_exceptions = 0 self.object_exceptions = 0 self.chunk_exceptions = 0 self.list_cache = {} self.running = {}
def imap(requests, stream=False, size=2): """Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If True, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 """ pool = GreenPool(size) def send(r): return r.send(stream=stream) for r in pool.imap_unordered(send, requests): yield r pool.waitall()
def map(requests, prefetch=True, size=None): """Concurrently converts a list of Requests to Responses. :param requests: a collection of Request objects. :param prefetch: If False, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. If None, no throttling occurs. """ requests = list(requests) pool = GreenPool(size) if size else None jobs = [send(r, pool, prefetch=prefetch) for r in requests] if pool is not None: pool.waitall() else: [j.wait() for j in jobs] return [r.response for r in requests]
def __init__(self, integration_bridge=None): if isinstance(integration_bridge, VCenter): self.v_center = integration_bridge else: self.v_center = VCenter(CONF.ml2_vmware) self.v_center.start() self._ports_by_device_id = {} # Device-id seems to be the same as port id self._sg_aggregates_per_dvs_uuid = defaultdict(lambda: defaultdict(sg_util.SgAggr)) self._green = self.v_center.pool or GreenPool()
def __init__(self, service_cls, config, worker_ctx_cls=None): self.service_cls = service_cls self.config = config if worker_ctx_cls is None: worker_ctx_cls = WorkerContext self.worker_ctx_cls = worker_ctx_cls self.service_name = get_service_name(service_cls) self.shared_extensions = {} self.max_workers = (config.get(MAX_WORKERS_CONFIG_KEY) or DEFAULT_MAX_WORKERS) self.serializer = config.get(SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER) self.accept = [self.serializer] self.entrypoints = SpawningSet() self.dependencies = SpawningSet() self.subextensions = SpawningSet() for attr_name, dependency in inspect.getmembers( service_cls, is_dependency): bound = dependency.bind(self.interface, attr_name) self.dependencies.add(bound) self.subextensions.update(iter_extensions(bound)) for method_name, method in inspect.getmembers(service_cls, is_method): entrypoints = getattr(method, ENTRYPOINT_EXTENSIONS_ATTR, []) for entrypoint in entrypoints: bound = entrypoint.bind(self.interface, method_name) self.entrypoints.add(bound) self.subextensions.update(iter_extensions(bound)) self.started = False self._worker_pool = GreenPool(size=self.max_workers) self._active_threads = {} self._protected_threads = set() self._being_killed = False self._died = Event()
class AsyncMixin(object): def __init__(self): self.pool = GreenPool() def publish_async(self, event, callback, *args, **kwargs): return self.pool.spawn(self.__publish_async, event, callback, *args, **kwargs) def __publish_async(self, event, callback, *args, **kwargs): if event not in self.listeners: return for listener in self.listeners[event]: self.pool.spawn(self.__worker(listener, callback), *args, **kwargs) def __worker(self, listener, callback): def worker(*args, **kwargs): callback(listener(*args, **kwargs)) return worker
def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) for o in self.swift.iter_objects(self.restoring_object_account, self.todo_container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (self.todo_container, obj)). hexdigest(), 16) if obj_process % processes != process: continue pool.spawn_n(self.start_object_restoring, obj) pool.waitall() for o in self.swift.iter_objects(self.restoring_object_account, self.restoring_container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (self.restoring_container, obj)). hexdigest(), 16) if obj_process % processes != process: continue pool.spawn_n(self.check_object_restored, obj) pool.waitall() self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout) as e: report_exception(self.logger, _('Unhandled exception'), self.client)
def build_main(commits, config): logger = logging.getLogger(__name__ + '.build_main') logger.info('triggered with %d commits', len(commits)) logger.debug('commits = %r', commits) token = get_token(config) pool = GreenPool() commit_map = dict(commits) commit_ids = [commit_id for commit_id, _ in commits] results = pool.imap( functools.partial(download_archive, token=token, config=config), commit_ids ) env = make_virtualenv(config) save_dir = config['SAVE_DIRECTORY'] complete_hook = config.get('COMPLETE_HOOK') for commit, filename in results: working_dir = extract(filename, save_dir) try: build = build_sphinx(working_dir, env) except Exception: permalink = commit_map[commit] if not config.get('RECREATE_VIRTUALENV'): env = make_virtualenv(config, recreate=True) try: build = build_sphinx(working_dir, env) except Exception: if callable(complete_hook): complete_hook(commit, permalink, sys.exc_info()) continue if callable(complete_hook): complete_hook(commit, permalink, sys.exc_info()) continue result_dir = os.path.join(save_dir, commit) shutil.move(build, result_dir) logger.info('build complete: %s' % result_dir) if callable(complete_hook): complete_hook(commit, commit_map[commit], None) shutil.rmtree(working_dir) logger.info('working directory %s has removed' % working_dir) with open_head_file('w', config=config) as f: f.write(commit) logger.info('new head: %s', commits[0])
def run_stock_parser(): stock_symbols = [] with open('symbols.txt', 'r') as symfile: for n, line in enumerate(symfile): sym = line.strip() if sym: stock_symbols.append(sym) ncpu = cpu_count() pool = GreenPool(ncpu * 4) stock_prices = [] for symbol, price in pool.imap(read_stock_url, stock_symbols): stock_prices.append((symbol, price)) with open('stock_prices.csv', 'w') as outfile: outfile.write('Stock,Price\n') for symbol, price in stock_prices: outfile.write('%s,%s\n' % (symbol, price))
def collect(self, project, **options): logger.debug(project) project = ProjectSchema().load(self.project_rpc.get(project)) if project.language.lower() not in self.config['KEYWORDS']: raise LanguageNotSupported(f'{project.language} not supported') keywords = self.config['KEYWORDS'].get(project.language.lower()) keywrd = Keywrd(keywords=keywords) commits = self.repository_rpc.get_commits(project.name) chunks = utilities.chunk(commits, size=round(len(commits) * 0.01)) pool = GreenPool(os.cpu_count()) arguments = [(project, c, self.repository_rpc) for c in chunks] keyword = list() for patches in pool.starmap(_get_patches, arguments): patches = PatchSchema(many=True).load(patches) keyword.extend(keywrd.get(patches)) return KeywordSchema(many=True).dump(keyword)
def build_main(commits, config): logger = logging.getLogger(__name__ + '.build_main') logger.info('triggered with %d commits', len(commits)) logger.debug('commits = %r', commits) token = get_token(config) pool = GreenPool() commit_map = dict(commits) commit_ids = [commit_id for commit_id, _ in commits] results = pool.imap( functools.partial(download_archive, token=token, config=config), commit_ids) env = make_virtualenv(config) save_dir = config['SAVE_DIRECTORY'] complete_hook = config.get('COMPLETE_HOOK') for commit, filename in results: working_dir = extract(filename, save_dir) try: build = build_sphinx(working_dir, env) except Exception: permalink = commit_map[commit] if not config.get('RECREATE_VIRTUALENV'): env = make_virtualenv(config, recreate=True) try: build = build_sphinx(working_dir, env) except Exception: if callable(complete_hook): complete_hook(commit, permalink, sys.exc_info()) continue if callable(complete_hook): complete_hook(commit, permalink, sys.exc_info()) continue result_dir = os.path.join(save_dir, commit) shutil.move(build, result_dir) logger.info('build complete: %s' % result_dir) if callable(complete_hook): complete_hook(commit, commit_map[commit], None) shutil.rmtree(working_dir) logger.info('working directory %s has removed' % working_dir) with open_head_file('w', config=config) as f: f.write(commit) logger.info('new head: %s', commits[0])
def run(self, action_ref, parameters=None, count=10, concurrency=None): if not concurrency: concurrency = count pool = GreenPool(concurrency) client = Client() execution_ids = [] def schedule_action(action_ref, parameters): execution = LiveAction() execution.action = action_ref execution.parameters = parameters execution = client.liveactions.create(execution) execution_ids.append(execution.id) start_timestamp = time.time() for index in range(0, count): pool.spawn(schedule_action, action_ref, parameters) pool.waitall() end_timestamp = time.time() delta = (end_timestamp - start_timestamp) print('Scheduled %s action executions in %ss.' % (count, delta))
def __init__(self, service_cls, config, worker_ctx_cls=None): self.service_cls = service_cls self.config = config if worker_ctx_cls is not None: warnings.warn( "The constructor of `ServiceContainer` has changed. " "The `worker_ctx_cls` kwarg is now deprecated. See CHANGES, " "version 2.4.0 for more details. This warning will be removed " "in version 2.6.0", DeprecationWarning ) else: worker_ctx_cls = WorkerContext self.worker_ctx_cls = worker_ctx_cls self.service_name = get_service_name(service_cls) self.shared_extensions = {} self.max_workers = ( config.get(MAX_WORKERS_CONFIG_KEY) or DEFAULT_MAX_WORKERS) self.serializer = config.get( SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER) self.accept = [self.serializer] self.entrypoints = SpawningSet() self.dependencies = SpawningSet() self.subextensions = SpawningSet() for attr_name, dependency in inspect.getmembers(service_cls, is_dependency): bound = dependency.bind(self.interface, attr_name) self.dependencies.add(bound) self.subextensions.update(iter_extensions(bound)) for method_name, method in inspect.getmembers(service_cls, is_method): entrypoints = getattr(method, ENTRYPOINT_EXTENSIONS_ATTR, []) for entrypoint in entrypoints: bound = entrypoint.bind(self.interface, method_name) self.entrypoints.add(bound) self.subextensions.update(iter_extensions(bound)) self.started = False self._worker_pool = GreenPool(size=self.max_workers) self._worker_threads = {} self._managed_threads = {} self._being_killed = False self._died = Event()
def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = set([]) self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug('Run begin') containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info( _('Pass beginning; ' '%(containers)s possible containers; ' '%(objects)s possible objects') % { 'containers': containers, 'objects': objects }) for container, obj in self.iter_cont_objs_to_expire(): containers_to_delete.add(container) if not obj: continue timestamp, actual_obj = obj.split('-', 1) timestamp = int(timestamp) if timestamp > int(time()): break pool.spawn_n(self.delete_object, actual_obj, timestamp, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %(container)s ' '%(err)s') % { 'container': container, 'err': str(err) }) self.logger.debug('Run end') self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception'))
def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ processes, process = self.get_process_values(kwargs) pool = GreenPool(self.concurrency) containers_to_delete = [] self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug(_('Run begin')) containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info( _('Pass beginning; %s possible containers; %s ' 'possible objects') % (containers, objects)) for c in self.swift.iter_containers(self.expiring_objects_account): container = c['name'] timestamp = int(container) if timestamp > int(time()): break containers_to_delete.append(container) for o in self.swift.iter_objects(self.expiring_objects_account, container): obj = o['name'].encode('utf8') if processes > 0: obj_process = int( hashlib.md5('%s/%s' % (container, obj)).hexdigest(), 16) if obj_process % processes != process: continue timestamp, actual_obj = obj.split('-', 1) timestamp = int(timestamp) if timestamp > int(time()): break pool.spawn_n(self.delete_object, actual_obj, timestamp, container, obj) pool.waitall() for container in containers_to_delete: try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %s %s') % (container, str(err))) self.logger.debug(_('Run end')) self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception'))
def __init__(self, service_cls, worker_ctx_cls, config): self.service_cls = service_cls self.worker_ctx_cls = worker_ctx_cls self.service_name = get_service_name(service_cls) self.config = config self.max_workers = (config.get(MAX_WORKERS_CONFIG_KEY) or DEFAULT_MAX_WORKERS) self.dependencies = DependencySet() for dep in prepare_dependencies(self): self.dependencies.add(dep) self.started = False self._worker_pool = GreenPool(size=self.max_workers) self._active_threads = {} self._protected_threads = set() self._being_killed = False self._died = Event()
def download(files, settings): temp_dir = settings['temp_dir'] download_path = settings['download_path'] download = DownloadPool(settings) decode = Decode() pool = GreenPool(settings['connections']) progress_tracker = greenthread.spawn(show_progress) for file_ in files: # Check if file from subject exists. subject_filename = helper.get_filename_from(file_['file_subject']) if helper.file_exists(download_path, subject_filename): Tracker.total_size -= sum([i['segment_bytes'] for i in file_['segments']]) print subject_filename, 'already exists' continue # Download. for segment_path in pool.imap(download.download, file_['segments']): # Decode. if segment_path: tpool.execute(decode.decode, segment_path, temp_dir, download_path) if decode.tracker: print 'have broken files...' #return False print decode.tracker broken_files = decode.tracker.keys() for fname in broken_files: #print 'decoding', fname decode.join_files(fname, temp_dir, download_path) #progress_tracker.kill() progress_tracker.wait() # All OK. return 0
def __init__(self, service_cls_list, config): self.service_cls_list = service_cls_list self.config = config self.serializer, self.accept = serialization.setup(self.config) self.max_workers = (config.get(MAX_WORKERS_CONFIG_KEY) or DEFAULT_MAX_WORKERS) self.shared_extensions = {} self.entrypoints = SpawningSet() self.dependencies = SpawningSet() self.subextensions = SpawningSet() self.started = False self._worker_pool = GreenPool(size=self.max_workers) self._worker_threads = {} self._managed_threads = {} self._being_killed = False self._died = Event() for service_cls in service_cls_list: self.service_cls = service_cls self.service_name = get_service_name(service_cls) for attr_name, dependency in inspect.getmembers( service_cls, is_dependency): bound = dependency.bind(self.interface, attr_name) self.dependencies.add(bound) self.subextensions.update(iter_extensions(bound)) for method_name, method in inspect.getmembers( service_cls, is_method): entrypoints = getattr(method, ENTRYPOINT_EXTENSIONS_ATTR, []) for entrypoint in entrypoints: bound = entrypoint.bind(self.interface, method_name) self.entrypoints.add(bound) self.subextensions.update(iter_extensions(bound))
def run_once(self, *args, **kwargs): """ Executes a single pass, looking for objects to expire. :param args: Extra args to fulfill the Daemon interface; this daemon has no additional args. :param kwargs: Extra keyword args to fulfill the Daemon interface; this daemon accepts processes and process keyword args. These will override the values from the config file if provided. """ self.get_process_values(kwargs) pool = GreenPool(self.concurrency) self.report_first_time = self.report_last_time = time() self.report_objects = 0 try: self.logger.debug('Run begin') containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) self.logger.info( _('Pass beginning; ' '%(containers)s possible containers; ' '%(objects)s possible objects') % { 'containers': containers, 'objects': objects }) task_containers = list(self.iter_task_containers_to_expire()) # delete_task_iter is a generator to yield a dict of # task_container, task_object, delete_timestamp, target_path # to handle delete actual object and pop the task from the queue. delete_task_iter = self.round_robin_order( self.iter_task_to_expire(task_containers)) for delete_task in delete_task_iter: pool.spawn_n(self.delete_object, **delete_task) pool.waitall() for container in task_containers: try: self.swift.delete_container( self.expiring_objects_account, container, acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( _('Exception while deleting container %(container)s ' '%(err)s') % { 'container': container, 'err': str(err) }) self.logger.debug('Run end') self.report(final=True) except (Exception, Timeout): self.logger.exception(_('Unhandled exception'))
def __init__(self, service_name=None, *args, **kwargs): """Load the specified in args, or flags.""" # update_service_capabilities needs service_name to be volume super(CinderProxy, self).__init__(service_name='volume', *args, **kwargs) self.configuration = Configuration(volume_manager_opts, config_group=service_name) self._tp = GreenPool() self.volume_api = volume.API() self._last_info_volume_state_heal = 0 self._change_since_time = None self.volumes_mapping_cache = {'volumes': {}, 'snapshots': {}} self._init_volume_mapping_cache() self.image_service = glance.get_default_image_service()
def test_high_client_load(): test_context = {'clients': 0, 'services': 0} pool = GreenPool() pool.spawn(fake_service, "tcp://127.0.0.1:6801", test_context) for i in xrange(0, 10): pool.spawn(fake_client, "tcp://127.0.0.1:6800", "%s" % i, test_context) pool.waitall() assert_equal(test_context['clients'], 10) assert_equal(test_context['services'], 100)
def __init__(self, service_cls, config, worker_ctx_cls=None): self.service_cls = service_cls self.config = config if worker_ctx_cls is None: worker_ctx_cls = WorkerContext self.worker_ctx_cls = worker_ctx_cls self.service_name = get_service_name(service_cls) self.shared_extensions = {} self.max_workers = ( config.get(MAX_WORKERS_CONFIG_KEY) or DEFAULT_MAX_WORKERS) self.serializer = config.get( SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER) self.accept = [self.serializer] self.entrypoints = SpawningSet() self.dependencies = SpawningSet() self.subextensions = SpawningSet() for attr_name, dependency in inspect.getmembers(service_cls, is_dependency): bound = dependency.bind(self.interface, attr_name) self.dependencies.add(bound) self.subextensions.update(iter_extensions(bound)) for method_name, method in inspect.getmembers(service_cls, is_method): entrypoints = getattr(method, ENTRYPOINT_EXTENSIONS_ATTR, []) for entrypoint in entrypoints: bound = entrypoint.bind(self.interface, method_name) self.entrypoints.add(bound) self.subextensions.update(iter_extensions(bound)) self.started = False self._worker_pool = GreenPool(size=self.max_workers) self._active_threads = {} self._protected_threads = set() self._being_killed = False self._died = Event()
def __init__( self, controllercls, connection_factory, exchange, topic, pool=None, poolsize=1000): self.nodeid = UIDGEN() self.max_workers = poolsize if pool is None: self.procpool = GreenPool(size=poolsize) else: self.procpool = pool self.controller = controllercls() self.service = self.controller self.topic = topic self.greenlet = None self.consume_ready = Event() node_topic = "{}.{}".format(self.topic, self.nodeid) self.nova_queues = [ entities.get_topic_queue(exchange, topic), entities.get_topic_queue(exchange, node_topic), entities.get_fanout_queue(topic), ] self._channel = None self._consumers = None self.connection = connection_factory() self.connection_factory = connection_factory inject_dependencies(self.controller, self) self._connection_pool = Pool( max_size=self.procpool.size, create=connection_factory ) self.workers = set() self._pending_ack_messages = [] self._pending_requeue_messages = [] self._do_cancel_consumers = False self._consumers_cancelled = Event() self._timers = list(get_timers(self.controller))
def __init__(self, path=None, id=None, master=None, settings=None, log_level=logging.WARNING, baron=None, address=None): self.baron = baron self.path = os.path.abspath(path or '.') if id is None: id = platform.node() self.id = id if not os.path.exists(self.path): os.makedirs(path) self.log_level = log_level self.address = address self._socket = None self._started = False self._failed = False self.services = [] self._service_map = {} self._deployments = [] self._keys = set() # A set of keys allowed to edit things. self._node_map = {id: self} self.master = master or self self.neighbors = [] # Any node we know about. self.vassals = [] self.rogue = [] # TODO: Put nodes that should be vassals # but don't recognize us here. self._pool = GreenPool() if (self.master != self): self._node_map[self.master.id] = self.master self.dispatcher = Dispatcher(self) self.load_settings(settings=settings) print "Sovereign node (%s) created at %s" % (self.id, self.path) print "", "- primary authentication key:", self.key
def run(self, count=100): pool = GreenPool(count) client = Client() rule_ids = [] name_patterns = ['key1', 'key2', 'key3', 'key4', 'key5'] def create_rule(rule): try: rule = client.rules.create(rule) except Exception as e: # Rule already exists, ignore the error print(e) return rule_ids.append(rule.id) start_timestamp = time.time() index_name_pattern = 0 for index in range(0, count): rule = Rule() rule.name = 'rule_%s' % (index) rule.pack = 'default' rule.trigger = {'type': 'core.st2.key_value_pair.create'} # Use uniform distribution of names so if COUNT is 100, each key # will be used COUNT / len(KEYS) if index_name_pattern >= len(name_patterns): index_name_pattern = 0 pattern = name_patterns[index_name_pattern] rule.criteria = { 'trigger.object.name': { 'pattern': (pattern), 'type': 'equals' } } rule.action = {'ref': 'core.noop'} index_name_pattern += 1 pool.spawn(create_rule, rule) pool.waitall() end_timestamp = time.time() delta = (end_timestamp - start_timestamp) print('Created %d rules in %ss.' % (count, delta))
def __init__(self, service_cls, worker_ctx_cls, config): self.service_cls = service_cls self.worker_ctx_cls = worker_ctx_cls self.service_name = get_service_name(service_cls) self.config = config self.max_workers = config.get(MAX_WORKERS_KEY) or DEFAULT_MAX_WORKERS self.dependencies = DependencySet() for dep in prepare_dependencies(self): self.dependencies.add(dep) self.started = False self._worker_pool = GreenPool(size=self.max_workers) self._active_threads = set() self._protected_threads = set() self._being_killed = False self._died = Event()
def __init__(self, controllercls, connection, exchange, topic, pool=None, poolsize=1000): self.nodeid = UIDGEN() if pool is None: self.procpool = GreenPool(size=poolsize) else: self.procpool = pool self.connection = connection self.controller = controllercls() self.topic = topic self.greenlet = None self.messagesem = Semaphore() self.consume_ready = Event() node_topic = "{}.{}".format(self.topic, self.nodeid) self.queues = [entities.get_topic_queue(exchange, topic), entities.get_topic_queue(exchange, node_topic), entities.get_fanout_queue(topic), ] self._channel = None self._consumers = None