def run(self, action_ref, parameters=None, count=10, concurrency=None):
        if not concurrency:
            concurrency = count

        pool = GreenPool(concurrency)
        client = Client()
        execution_ids = []

        def schedule_action(action_ref, parameters):
            execution = LiveAction()
            execution.action = action_ref
            execution.parameters = parameters

            execution = client.liveactions.create(execution)
            execution_ids.append(execution.id)

        start_timestamp = time.time()

        for index in range(0, count):
            pool.spawn(schedule_action, action_ref, parameters)

        pool.waitall()

        end_timestamp = time.time()
        delta = (end_timestamp - start_timestamp)

        print('Scheduled %s action executions in %ss.' % (count, delta))
Пример #2
0
def test_high_client_load():
    eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6805")
    clients = GreenPool()

    for i in xrange(0, 100):
        clients.spawn(fake_client, "tcp://127.0.0.1:6804",
                      "%s:%s" % (os.getpid(), i))

    clients.waitall()
Пример #3
0
def test_high_workload():
    # fire up three services to receive in roundrobin style, giving
    # each an ident so we can make sure they're working that way
    eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 1)
    eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 2)
    eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 3)

    clients = GreenPool()

    # fire up a bunch of clients to thrash it at random
    for i in xrange(0, 100):
        clients.spawn(fake_client, "tcp://127.0.0.1:6802", "%s:%s" % (os.getpid(), i))

    clients.waitall()
Пример #4
0
    def run(self, count=100):
        pool = GreenPool(count)
        client = Client()
        rule_ids = []

        name_patterns = ['key1', 'key2', 'key3', 'key4', 'key5']

        def create_rule(rule):
            try:
                rule = client.rules.create(rule)
            except Exception as e:
                # Rule already exists, ignore the error
                print(e)
                return

            rule_ids.append(rule.id)

        start_timestamp = time.time()

        index_name_pattern = 0
        for index in range(0, count):
            rule = Rule()
            rule.name = 'rule_%s' % (index)
            rule.pack = 'default'
            rule.trigger = {'type': 'core.st2.key_value_pair.create'}

            # Use uniform distribution of names so if COUNT is 100, each key
            # will be used COUNT / len(KEYS)
            if index_name_pattern >= len(name_patterns):
                index_name_pattern = 0

            pattern = name_patterns[index_name_pattern]
            rule.criteria = {
                'trigger.object.name': {
                    'pattern': (pattern),
                    'type': 'equals'
                }
            }
            rule.action = {'ref': 'core.noop'}
            index_name_pattern += 1

            pool.spawn(create_rule, rule)

        pool.waitall()

        end_timestamp = time.time()
        delta = (end_timestamp - start_timestamp)

        print('Created %d rules in %ss.' % (count, delta))
Пример #5
0
def test_high_client_load():
    test_context = {'clients': 0, 'services': 0}
    pool = GreenPool()

    pool.spawn(fake_service,
                     "tcp://127.0.0.1:6801", test_context)

    for i in xrange(0, 10):
        pool.spawn(fake_client, "tcp://127.0.0.1:6800",
                      "%s" % i, test_context)

    pool.waitall()

    assert_equal(test_context['clients'], 10)
    assert_equal(test_context['services'], 100)
Пример #6
0
def get_all_highest_druing_previous_days(days, result):
    stock_codes = get_stock_codes()
    gp = GreenPool()
    today = datetime.now().strftime("%Y-%m-%d")
    filename = "%s_highest_%s_days.json" % (today, days)
    if os.path.exists(filename):
        with open(filename) as f:
            result.update(json.load(f))
            return
    for stock in stock_codes:
        gp.spawn(
            eventlet_handle,
            functools.partial(get_edge_during_previous_days,
                              coloumn='high',
                              days=days,
                              highest=True), stock, result)
    gp.waitall()
    with open(filename, 'w') as f:
        json.dump(result, f, indent=2)
Пример #7
0
class AsyncMixin(object):
    def __init__(self):
        self.pool = GreenPool()

    def publish_async(self, event, callback, *args, **kwargs):
        return self.pool.spawn(self.__publish_async, event, callback, *args,
                               **kwargs)

    def __publish_async(self, event, callback, *args, **kwargs):
        if event not in self.listeners:
            return
        for listener in self.listeners[event]:
            self.pool.spawn(self.__worker(listener, callback), *args, **kwargs)

    def __worker(self, listener, callback):
        def worker(*args, **kwargs):
            callback(listener(*args, **kwargs))

        return worker
Пример #8
0
    def _parallel_execute(self, operation, *args):

        def _spawn(context, operation, fabric_name, conn, *args):
            # Inherit this thread's context from the parent
            context.update_store()

            @lockutils.synchronized(fabric_name, 'fcfabric-', True)
            def _locked_spawn(operation, fabric_name, conn, *args):
                return operation(fabric_name, conn, *args)

            return _locked_spawn(operation, fabric_name, conn, *args)

        """
        Perform an operation against all fabrics, consolidate the responses
        into a dictionary keyed on fabric name.
        """
        pool = GreenPool(size=len(self.fabrics))

        # Obtain our current context so that we can make sure that our child
        # threads have the same context, so that we can correlate log messages
        # that they generate.
        context = getattr(local.store, 'context', None)
        threads = {}
        for fabric_name, conn in self.fabrics.iteritems():
            thread = pool.spawn(_spawn, context, operation, fabric_name, conn,
                                *args)
            threads[fabric_name] = thread

        # Collect the responses.  This may raise exceptions when we call wait()
        # If they do, we collect them and raise a collective exception at the
        # end.
        responses = {}
        exceptions = []

        for fabric_name, thread in threads.iteritems():
            try:
                responses[fabric_name] = thread.wait()
            except Exception as e:
                """
                FabricExceptions can indicate that a backtrace is not required
                if they contain sufficient debug information themselves.
                """
                if (not isinstance(e, exception.FabricException) or
                   e.backtrace_needed):
                    LOG.exception(e)
                exceptions.append(e)

        # If any exceptions were raised, we throw an exception that
        # encapsulates them all.
        if exceptions:
            raise exception.ZoneManagerParallel(exceptions)

        return responses
Пример #9
0
    for service in services.getElementsByTagName('service'):

        service_name = service.getElementsByTagName('name')[0].firstChild.nodeValue
#         host='localhost', exchange='metadata', service_type='call', routing_key='meta_queue', on_request_name = 'on_request_metadata'
        service_params = {}
        service_params['service_type'] = service.attributes['type'].firstChild.nodeValue
        service_params['host'] = service.getElementsByTagName('host')[0].firstChild.nodeValue
        service_params['exchange'] = service.getElementsByTagName('params')[0].getElementsByTagName('exchange')[0].firstChild.nodeValue
        service_params['routing_key'] = service.getElementsByTagName('params')[0].getElementsByTagName('queue')[0].firstChild.nodeValue
        service_params['on_request_name'] = service.getElementsByTagName('params')[0].getElementsByTagName('callback')[0].firstChild.nodeValue
        
        services_dict[service_name] = service_params
        
    return services_dict


#For Test
if __name__ == '__main__':
    
    import threading
    pool = GreenPool()
    
    services = get_services()
    service = sys.argv[1]
    
    if service in services.keys():
        pool.spawn(Worker().start(**services.get(service, {})))
        
    else: print 'no such service'
        
Пример #10
0
def entrance():
    pool = GreenPool(100)
    for x in range(10):
        pool.spawn(fetcher)
    pool.waitall()
Пример #11
0
class ServiceContainer(object):
    def __init__(self, service_cls, worker_ctx_cls, config):

        self.service_cls = service_cls
        self.worker_ctx_cls = worker_ctx_cls

        self.service_name = get_service_name(service_cls)

        self.config = config
        self.max_workers = (config.get(MAX_WORKERS_CONFIG_KEY)
                            or DEFAULT_MAX_WORKERS)

        self.dependencies = DependencySet()
        for dep in prepare_dependencies(self):
            self.dependencies.add(dep)

        self.started = False
        self._worker_pool = GreenPool(size=self.max_workers)

        self._active_threads = {}
        self._protected_threads = set()
        self._being_killed = False
        self._died = Event()

    @property
    def entrypoints(self):
        return filter(is_entrypoint_provider, self.dependencies)

    @property
    def injections(self):
        return filter(is_injection_provider, self.dependencies)

    def start(self):
        """ Start a container by starting all the dependency providers.
        """
        _log.debug('starting %s', self)
        self.started = True

        with _log_time('started %s', self):
            self.dependencies.all.prepare()
            self.dependencies.all.start()

    def stop(self):
        """ Stop the container gracefully.

        First all entrypoints are asked to ``stop()``.
        This ensures that no new worker threads are started.

        It is the providers' responsibility to gracefully shut down when
        ``stop()`` is called on them and only return when they have stopped.

        After all entrypoints have stopped the container waits for any
        active workers to complete.

        After all active workers have stopped the container stops all
        injections.

        At this point there should be no more managed threads. In case there
        are any managed threads, they are killed by the container.
        """
        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        if self._being_killed:
            # this race condition can happen when a container is hosted by a
            # runner and yields during its kill method; if it's unlucky in
            # scheduling the runner will try to stop() it before self._died
            # has a result
            _log.debug('already being killed %s', self)
            try:
                self._died.wait()
            except:
                pass  # don't re-raise if we died with an exception
            return

        _log.debug('stopping %s', self)

        with _log_time('stopped %s', self):
            dependencies = self.dependencies

            # entrypoint deps have to be stopped before injection deps
            # to ensure that running workers can successfully complete
            dependencies.entrypoints.all.stop()

            # there might still be some running workers, which we have to
            # wait for to complete before we can stop injection dependencies
            self._worker_pool.waitall()

            # it should be safe now to stop any injection as there is no
            # active worker which could be using it
            dependencies.injections.all.stop()

            # finally, stop nested dependencies
            dependencies.nested.all.stop()

            # just in case there was a provider not taking care of its workers,
            # or a dependency not taking care of its protected threads
            self._kill_active_threads()
            self._kill_protected_threads()

            self.started = False
            self._died.send(None)

    def kill(self, exc_info=None):
        """ Kill the container in a semi-graceful way.

        All non-protected managed threads are killed first. This includes
        all active workers generated by :meth:`ServiceContainer.spawn_worker`.
        Next, dependencies are killed. Finally, any remaining protected threads
        are killed.

        If ``exc_info`` is provided, the exception will be raised by
        :meth:`~wait``.
        """
        if self._being_killed:
            # this happens if a managed thread exits with an exception
            # while the container is being killed or if multiple errors
            # happen simultaneously
            _log.debug('already killing %s ... waiting for death', self)
            try:
                self._died.wait()
            except:
                pass  # don't re-raise if we died with an exception
            return

        self._being_killed = True

        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        if exc_info is not None:
            _log.info('killing %s due to %s', self, exc_info[1])
        else:
            _log.info('killing %s', self)

        # protect against dependencies that throw during kill; the container
        # is already dying with an exception, so ignore anything else
        def safely_kill_dependencies(dep_set):
            try:
                dep_set.kill()
            except Exception as exc:
                _log.warning('Dependency raised `%s` during kill', exc)

        safely_kill_dependencies(self.dependencies.entrypoints.all)
        self._kill_active_threads()
        safely_kill_dependencies(self.dependencies.all)
        self._kill_protected_threads()

        self.started = False
        self._died.send(None, exc_info)

    def wait(self):
        """ Block until the container has been stopped.

        If the container was stopped due to an exception, ``wait()`` will
        raise it.

        Any unhandled exception raised in a managed thread or in the
        life-cycle management code also causes the container to be
        ``kill()``ed, which causes an exception to be raised from ``wait()``.
        """
        return self._died.wait()

    def spawn_worker(self,
                     provider,
                     args,
                     kwargs,
                     context_data=None,
                     handle_result=None):
        """ Spawn a worker thread for running the service method decorated
        with an entrypoint ``provider``.

        ``args`` and ``kwargs`` are used as arguments for the service
        method.

        ``context_data`` is used to initialize a ``WorkerContext``.

        ``handle_result`` is an optional function which may be passed
        in by the entrypoint provider. It is called with the result returned
        or error raised by the service method. If provided it must return a
        value for ``result`` and ``exc_info`` to propagate to dependencies;
        these may be different to those returned by the service method.
        """

        if self._being_killed:
            _log.info("Worker spawn prevented due to being killed")
            raise ContainerBeingKilled()

        service = self.service_cls()
        worker_ctx = self.worker_ctx_cls(self,
                                         service,
                                         provider,
                                         args,
                                         kwargs,
                                         data=context_data)

        _log.debug('spawning %s', worker_ctx)
        gt = self._worker_pool.spawn(self._run_worker, worker_ctx,
                                     handle_result)
        self._active_threads[gt] = provider
        gt.link(self._handle_thread_exited)
        return worker_ctx

    def spawn_managed_thread(self, run_method, protected=False):
        """ Spawn a managed thread to run ``run_method``.

        Threads can be marked as ``protected``, which means the container will
        not forcibly kill them until after all dependencies have been killed.
        Dependencies that require a managed thread to complete their kill
        procedure should ensure to mark them as ``protected``.

        Any uncaught errors inside ``run_method`` cause the container to be
        killed.

        It is the caller's responsibility to terminate their spawned threads.
        Threads are killed automatically if they are still running after
        all dependencies are stopped during :meth:`ServiceContainer.stop`.

        Entrypoints may only create separate threads using this method,
        to ensure they are life-cycle managed.
        """
        gt = eventlet.spawn(run_method)
        if not protected:
            self._active_threads[gt] = MANAGED_THREAD
        else:
            self._protected_threads.add(gt)
        gt.link(self._handle_thread_exited)
        return gt

    def _run_worker(self, worker_ctx, handle_result):
        _log.debug('setting up %s', worker_ctx)

        if not worker_ctx.parent_call_stack:
            _log.debug('starting call chain')
        _log.debug('call stack for %s: %s', worker_ctx,
                   '->'.join(worker_ctx.call_id_stack))

        with _log_time('ran worker %s', worker_ctx):

            self.dependencies.injections.all.inject(worker_ctx)
            self.dependencies.all.worker_setup(worker_ctx)

            result = exc_info = None
            method = getattr(worker_ctx.service, worker_ctx.provider.name)
            try:

                _log.debug('calling handler for %s', worker_ctx)

                with _log_time('ran handler for %s', worker_ctx):
                    result = method(*worker_ctx.args, **worker_ctx.kwargs)
            except Exception as exc:
                _log.debug('error handling worker %s: %s',
                           worker_ctx,
                           exc,
                           exc_info=True)
                exc_info = sys.exc_info()

            if handle_result is not None:
                _log.debug('handling result for %s', worker_ctx)

                with _log_time('handled result for %s', worker_ctx):
                    result, exc_info = handle_result(worker_ctx, result,
                                                     exc_info)

            with _log_time('tore down worker %s', worker_ctx):

                _log.debug('signalling result for %s', worker_ctx)
                self.dependencies.injections.all.worker_result(
                    worker_ctx, result, exc_info)

                # we don't need this any more, and breaking the cycle means
                # this can be reclaimed immediately, rather than waiting for a
                # gc sweep
                del exc_info

                self.dependencies.all.worker_teardown(worker_ctx)
                self.dependencies.injections.all.release(worker_ctx)

    def _kill_active_threads(self):
        """ Kill all managed threads that were not marked as "protected" when
        they were spawned.

        This set will include all worker threads generated by
        :meth:`ServiceContainer.spawn_worker`.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_active_threads = len(self._active_threads)

        if num_active_threads:
            _log.warning('killing %s active thread(s)', num_active_threads)
            for gt, provider in list(self._active_threads.items()):
                if provider is not MANAGED_THREAD:
                    description = '{}.{}'.format(self.service_name,
                                                 provider.name)
                    _log.warning('killing active thread for %s', description)
                gt.kill()

    def _kill_protected_threads(self):
        """ Kill any managed threads marked as protected when they were
        spawned.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_protected_threads = len(self._protected_threads)

        if num_protected_threads:
            _log.warning('killing %s protected thread(s)',
                         num_protected_threads)
            for gt in list(self._protected_threads):
                gt.kill()

    def _handle_thread_exited(self, gt):
        self._active_threads.pop(gt, None)
        self._protected_threads.discard(gt)

        try:
            gt.wait()

        except GreenletExit:
            # we don't care much about threads killed by the container
            # this can happen in stop() and kill() if providers
            # don't properly take care of their threads
            _log.warning('%s thread killed by container', self)

        except Exception:
            _log.error('%s thread exited with error', self, exc_info=True)
            # any error raised inside an active thread is unexpected behavior
            # and probably a bug in the providers or container.
            # to be safe we call self.kill() to kill our dependencies and
            # provide the exception info to be raised in self.wait().
            self.kill(sys.exc_info())

    def __repr__(self):
        service_name = repr_safe_str(self.service_name)
        return '<ServiceContainer [{}] at 0x{:x}>'.format(
            service_name, id(self))
class AsynchronousSection(object):
    """Allows calling function asynchronously with waiting on exit."""

    MIN_POOL_SIZE = 1

    def __init__(self, size=0, ignore_errors_num=0):
        """Initialises.

        :param size: the max number of parallel tasks
        :param ignore_errors_num:
               number of errors which does not stop the execution
        """

        self.executor = GreenPool(max(size, self.MIN_POOL_SIZE))
        self.ignore_errors_num = ignore_errors_num
        self.errors = []
        self.tasks = set()

    def __enter__(self):
        self.errors[:] = []
        return self

    def __exit__(self, etype, *_):
        self.wait(etype is not None)

    def execute(self, func, *args, **kwargs):
        """Calls function asynchronously."""
        if 0 <= self.ignore_errors_num < len(self.errors):
            raise RuntimeError("Too many errors.")

        gt = self.executor.spawn(func, *args, **kwargs)
        self.tasks.add(gt)
        gt.link(self.on_complete)
        return gt

    def on_complete(self, gt):
        """Callback to handle task completion."""

        try:
            gt.wait()
        except Exception as e:
            logger.error("Task failed: %s", six.text_type(e))
            self.errors.append(sys.exc_info())
        finally:
            self.tasks.discard(gt)

    def wait(self, ignore_errors=False):
        """Waits until all tasks will be completed.

        Do not use directly, will be called from context manager.
        """
        self.executor.waitall()
        if len(self.errors) > 0:
            for exc_info in self.errors:
                logger.exception("error details.", exc_info=exc_info)

            self.errors[:] = []
            if not ignore_errors:
                raise RuntimeError(
                    "Operations completed with errors.\n"
                    "See log for more details."
                )
Пример #13
0
class LocalNode(Node):
    def __init__(self, path=None, id=None, master=None, settings=None, log_level=logging.WARNING, baron=None, address=None):
        self.baron = baron
        self.path = os.path.abspath(path or '.')
        
        if id is None:
            id = platform.node()
        
        self.id = id
        
        if not os.path.exists(self.path):
            os.makedirs(path)
        
        self.log_level = log_level
        
        self.address = address
        self._socket = None
        
        self._started = False
        self._failed = False
        
        self.services = []
        self._service_map = {}
        self._deployments = []
        self._keys = set()      # A set of keys allowed to edit things.
        
        self._node_map = {id: self}
        self.master = master or self
        self.neighbors = []     # Any node we know about.
        self.vassals = []
        self.rogue = []         # TODO: Put nodes that should be vassals 
                                # but don't recognize us here.
                                
        self._pool = GreenPool()
        
        if (self.master != self):
            self._node_map[self.master.id] = self.master
        
        self.dispatcher = Dispatcher(self)
        
        self.load_settings(settings=settings)
        
        print "Sovereign node (%s) created at %s" % (self.id, self.path)
        print "", "- primary authentication key:", self.key
    
    def serve(self, address=None):
        """
            Serves the rest client at *address*.
        """
        if self._socket:
            self.close()
        
        try:
            self._socket = self.build_socket(address or self.address)
            self.address = self._socket.getsockname()
            
            self.start()
            self._started = True
            
            print "listening on http://%s:%s" % self.address
            wsgi.server(self._socket, self, log=FileLikeLogger(logging))
            self._socket = None
        except Exception:
            self._failed = True
            raise
            logging.exception("Error binding address.")
        finally:
            self.close()
    
    def nanny(self):
        """
        Waits for the node to start and returns True if it succeeds.
        Usefull for testing.
        """
        while not self._started and not self._failed:
            eventlet.sleep(.01)
        return not self._failed
            
    def spawn_thread(self, func, *args, **kwargs):
        thread = self._pool.spawn(func, *args, **kwargs)
        eventlet.sleep(0)
        return thread
    
    def start(self):
        for service in self.services:
            if not service.started and not service.disabled:
                service.deploy()
        
    def stop(self):
        for service in self.services:
            if service.started:
                service.stop()
    
    def close(self):
        if self._socket:
            try:
                self._socket.close()
            except:
                logging.exception("Socket will not shutdown.")
                pass
        self._socket = None
        self.address = None
    
    def __call__(self, env, start_response):
        start_response('200 OK', [('Content-Type', 'text/plain')])
        response = self.route(env)
        if (response is None):
            response = http.NotFound()
        return response(env, start_response)
    
    def authorize(self, env):
        auth = env.get('HTTP_AUTHORIZATION', None)
        if not auth:
            return False
        return auth in self._keys
    
    def add_key(self, key=None):
        if key is None:
            key = random_str()
        self._keys.add(key)
        return key
    
    def rem_key(self, key):
        self._keys.remove(key)
    
    def route(self, env):
        try:
            for service in self.services:
                response = service.route(env)
                if response is not None:
                    return response
        
            return self.dispatcher.route(env)
        except:
            logging.exception("Error handling request.")
            raise
    
    def build_socket(self, address, reusable=True, listen=500):
        if (self.baron):
            return self.baron.create_socket(address, reusable, listen)
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        if reusable:
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.bind(tuple(address))
        sock.listen(listen)
        return sock
    
    def load_settings(self, settings=None):
        path = os.path.join(self.path, 'settings.json')
        if settings is False or (not os.path.exists(path) and not settings):
            settings = {
                'services': [{
                    'id': 'admin',
                    'type': 'sovereign.contrib.admin.Service',
                }]
            }
        elif not settings:
            settings = json.load(open(path, 'r'))
        
        self.key = settings.get('key', random_str())
        self._keys = set([self.key])
        
        if (self.address is None):
            self.address = settings.get('address', ('0.0.0.0', 1648))
        
        for service in settings.get('services', ()):
            self.create_service(service.get('id', 'type'), service, deploy=False)
    
    def save_settings(self):
        path = os.path.join(self.path, 'settings.json')
        
        settings = {
            'id': self.id,
            'key': self.key,
            'address': self.address,
            'services': [service.settings.flat('user') for service in self.services]
        }
        if self.master is not self:
            settings['master'] = self.master.address
        
        json.dump(settings, open(path, 'w'))
    
    ### Implementations ###
    def sys_install(self, packages):
        from system import sys_install
        return sys_install(packages)
    
    def sys_upgrade(self, packages):
        from system import sys_upgrade
        return sys_upgrade(packages)
        
    def sys_uninsall(self, packages):
        from system import sys_uninsall
        return sys_uninsall(packages)
    
    def pip_install(self, packages, env=None):
        if (env):
            cmd = "pip -E %s install %%s" % env
        else:
            cmd = "pip install %s"
        
        return self.sys(cmd % p for p in packages)
    
    def pip_uninstall(self, packages, env=None):
        if (env):
            cmd = "pip -E %s uninstall %%s" % env
        else:
            cmd = "pip uninstall %s"
        
        return self.sys(cmd % p for p in packages)
    
    def sys(self, cmds):
        stdout = []
        stderr = []
        for cmd in cmds:
            o, e, returncode = shell(self.path, cmd)
            stdout.append(o)
            stderr.append(e)
        return stdout, stderr
    
    def info(self):
        info = {
            'id': self.id,
            'address': self.address,
            'services': [service.info() for service in self.services],
            'vassals': [node.info() for node in self.vassals],
        }
        if self.master is not self:
            info['master'] = self.master.address
        return info
    
    def create_service(self, id, settings, deploy=True):
        if self._get_service(id):
            raise ServiceIdCollision("Cannot create another service with the same id: %r" % id)
        
        ServiceCls = get_service_class(settings['type'])
        service = ServiceCls(node=self, id=id, settings=settings)
        self.services.append(service)
        self._service_map[id] = service
        
        if deploy:
            if not service.settings.get('disabled', False):
                service.deploy()
        
        self.save_settings()
        return service.info()
        
    def modify_service(self, id, settings):
        service = self._get_service(id)
        if not service:
            return self.create_service(id, settings)
        else:
            service.stop()
            service.settings.update(settings)
            service.deploy()
            return service.info()
    
    def delete_service(self, id):
        service = self._get_service(id)
        if (not service):
            return True
        service.delete()
        self.services.remove(service)
        self._service_map.pop(id)
        self.save_settings()
        return True
    
    def msg_service(self, id, message, **kwargs):
        service = self._get_service(id)
        return service.msg(message, **kwargs)
    
    def get_service(self, id):
        service = self._get_service(id)
        if (service):
            return service.info()
        return None
    
    def _get_service(self, id):
        return self._service_map.get(id, None)
    
    def get_service_log(self, id, since=None):
        service = self._get_service(id)
        if service:
            if since:
                return tuple( reversed(tuple(service._log.get_lines_since(since))) )
            else:
                return service._log.get_lines()
        else:
            return None
    
    def create_local_node(self, id=None, path=None, settings=None):
        if self.get_node(id):
            raise NodeIdCollision("Cannot create a node with the id of another, known node: %r" % id)
        
        settings['type'] = 'local'
        node = LocalNode(path=path, id=id, master=self, settings=settings)
        self.spawn_thread(node.serve, settings['address'])
        self._node_map[id] = node
        self.vassals.append(node)
        return node
    
    def delete_node(self, id):
        node = self.get_node(id)
        if (not node in self.vassals):
            return False
        if (not node):
            return True
        node.terminate()
        self.vassals.remove(node)
        del self._node_map[node.id]
        return True
    
    def get_node(self, id):
        return self._node_map.get(id, None)
    
    def identify_nodes(self, nodes):
        raise NotImplementedError()
    
    def file_operations(self, operations):
        raise NotImplementedError()
    
    def recognize(self, node):
        self.master = node
        logging.info("We recognize: %r" % (node.address,))

    def _terminate(self, recurse):
        logging.info("Sovereign server terminating...")
        eventlet.sleep(.01)
        if recurse:
            for vassal in self.vassals:
                vassal.terminate()
                del self._node_map[vassal.id]
        if (self._socket):
            self.close()
            sys.exit(0)

    def terminate(self, recurse=True):
        self.stop()
        if (self._socket):
            eventlet.spawn_n(self._terminate, recurse)
            return True
        return False
Пример #14
0
class ServiceContainer(object):

    def __init__(self, service_cls, worker_ctx_cls, config):

        self.service_cls = service_cls
        self.worker_ctx_cls = worker_ctx_cls

        self.service_name = get_service_name(service_cls)

        self.config = config
        self.max_workers = config.get(MAX_WORKERS_KEY) or DEFAULT_MAX_WORKERS

        self.dependencies = DependencySet()
        for dep in prepare_dependencies(self):
            self.dependencies.add(dep)

        self.started = False
        self._worker_pool = GreenPool(size=self.max_workers)

        self._active_threads = set()
        self._protected_threads = set()
        self._being_killed = False
        self._died = Event()

    @property
    def entrypoints(self):
        return filter(is_entrypoint_provider, self.dependencies)

    @property
    def injections(self):
        return filter(is_injection_provider, self.dependencies)

    def start(self):
        """ Start a container by starting all the dependency providers.
        """
        _log.debug('starting %s', self)
        self.started = True

        with log_time(_log.debug, 'started %s in %0.3f sec', self):
            self.dependencies.all.prepare()
            self.dependencies.all.start()

    def stop(self):
        """ Stop the container gracefully.

        First all entrypoints are asked to ``stop()``.
        This ensures that no new worker threads are started.

        It is the providers' responsibility to gracefully shut down when
        ``stop()`` is called on them and only return when they have stopped.

        After all entrypoints have stopped the container waits for any
        active workers to complete.

        After all active workers have stopped the container stops all
        injections.

        At this point there should be no more managed threads. In case there
        are any managed threads, they are killed by the container.
        """
        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        _log.debug('stopping %s', self)

        with log_time(_log.debug, 'stopped %s in %0.3f sec', self):
            dependencies = self.dependencies

            # entrypoint deps have to be stopped before injection deps
            # to ensure that running workers can successfully complete
            dependencies.entrypoints.all.stop()

            # there might still be some running workers, which we have to
            # wait for to complete before we can stop injection dependencies
            self._worker_pool.waitall()

            # it should be safe now to stop any injection as there is no
            # active worker which could be using it
            dependencies.injections.all.stop()

            # finally, stop nested dependencies
            dependencies.nested.all.stop()

            # just in case there was a provider not taking care of its workers,
            # or a dependency not taking care of its protected threads
            self._kill_active_threads()
            self._kill_protected_threads()

            self.started = False
            self._died.send(None)

    def kill(self, exc):
        """ Kill the container in a semi-graceful way.

        All non-protected managed threads are killed first. This includes
        all active workers generated by :meth:`ServiceContainer.spawn_worker`.
        Next, dependencies are killed. Finally, any remaining protected threads
        are killed.

        The container dies with the given ``exc``.
        """
        if self._being_killed:
            # this happens if a managed thread exits with an exception
            # while the container is being killed or another caller
            # behaves in a similar manner
            _log.debug('already killing %s ... waiting for death', self)
            self._died.wait()

        self._being_killed = True

        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        _log.info('killing %s due to "%s"', self, exc)

        self.dependencies.entrypoints.all.kill(exc)
        self._kill_active_threads()
        self.dependencies.all.kill(exc)
        self._kill_protected_threads()

        self.started = False
        self._died.send_exception(exc)

    def wait(self):
        """ Block until the container has been stopped.

        If the container was stopped using ``kill(exc)``,
        ``wait()`` raises ``exc``.
        Any unhandled exception raised in a managed thread or in the
        life-cycle management code also causes the container to be
        ``kill()``ed, which causes an exception to be raised from ``wait()``.
        """
        return self._died.wait()

    def spawn_worker(self, provider, args, kwargs,
                     context_data=None, handle_result=None):
        """ Spawn a worker thread for running the service method decorated
        with an entrypoint ``provider``.

        ``args`` and ``kwargs`` are used as arguments for the service
        method.

        ``context_data`` is used to initialize a ``WorkerContext``.

        ``handle_result`` is an optional callback which may be passed
        in by the calling entrypoint provider. It is called with the
        result returned or error raised by the service method.
        """
        service = self.service_cls()
        worker_ctx = self.worker_ctx_cls(
            self, service, provider.name, args, kwargs, data=context_data)

        _log.debug('spawning %s', worker_ctx,
                   extra=worker_ctx.extra_for_logging)
        gt = self._worker_pool.spawn(self._run_worker, worker_ctx,
                                     handle_result)
        self._active_threads.add(gt)
        gt.link(self._handle_thread_exited)
        return worker_ctx

    def spawn_managed_thread(self, run_method, protected=False):
        """ Spawn a managed thread to run ``run_method``.

        Threads can be marked as ``protected``, which means the container will
        not forcibly kill them until after all dependencies have been killed.
        Dependencies that require a managed thread to complete their kill
        procedure should ensure to mark them as ``protected``.

        Any uncaught errors inside ``run_method`` cause the container to be
        killed.

        It is the caller's responsibility to terminate their spawned threads.
        Threads are killed automatically if they are still running after
        all dependencies are stopped during :meth:`ServiceContainer.stop`.

        Entrypoints may only create separate threads using this method,
        to ensure they are life-cycle managed.
        """
        gt = eventlet.spawn(run_method)
        if not protected:
            self._active_threads.add(gt)
        else:
            self._protected_threads.add(gt)
        gt.link(self._handle_thread_exited)
        return gt

    def _run_worker(self, worker_ctx, handle_result):
        _log.debug('setting up %s', worker_ctx,
                   extra=worker_ctx.extra_for_logging)

        if not worker_ctx.parent_call_stack:
            _log.debug('starting call chain',
                       extra=worker_ctx.extra_for_logging)
        _log.debug('call stack for %s: %s',
                   worker_ctx, '->'.join(worker_ctx.call_id_stack),
                   extra=worker_ctx.extra_for_logging)

        with log_time(_log.debug, 'ran worker %s in %0.3fsec', worker_ctx):

            self.dependencies.injections.all.inject(worker_ctx)
            self.dependencies.all.worker_setup(worker_ctx)

            result = exc = None
            try:
                _log.debug('calling handler for %s', worker_ctx,
                           extra=worker_ctx.extra_for_logging)

                method = getattr(worker_ctx.service, worker_ctx.method_name)

                with log_time(_log.debug, 'ran handler for %s in %0.3fsec',
                              worker_ctx):
                    result = method(*worker_ctx.args, **worker_ctx.kwargs)
            except Exception as e:
                log_worker_exception(worker_ctx, e)
                exc = e

            with log_time(_log.debug, 'tore down worker %s in %0.3fsec',
                          worker_ctx):

                _log.debug('signalling result for %s', worker_ctx,
                           extra=worker_ctx.extra_for_logging)
                self.dependencies.injections.all.worker_result(
                    worker_ctx, result, exc)

                _log.debug('tearing down %s', worker_ctx,
                           extra=worker_ctx.extra_for_logging)
                self.dependencies.all.worker_teardown(worker_ctx)
                self.dependencies.injections.all.release(worker_ctx)

            if handle_result is not None:
                _log.debug('handling result for %s', worker_ctx,
                           extra=worker_ctx.extra_for_logging)

                with log_time(_log.debug, 'handled result for %s in %0.3fsec',
                              worker_ctx):
                    handle_result(worker_ctx, result, exc)

    def _kill_active_threads(self):
        """ Kill all managed threads that were not marked as "protected" when
        they were spawned.

        This set will include all worker threads generated by
        :meth:`ServiceContainer.spawn_worker`.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_active_threads = len(self._active_threads)

        if num_active_threads:
            _log.warning('killing %s active thread(s)', num_active_threads)
            for gt in list(self._active_threads):
                gt.kill()

    def _kill_protected_threads(self):
        """ Kill any managed threads marked as protected when they were
        spawned.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_protected_threads = len(self._protected_threads)

        if num_protected_threads:
            _log.warning('killing %s protected thread(s)',
                         num_protected_threads)
            for gt in list(self._protected_threads):
                gt.kill()

    def _handle_thread_exited(self, gt):
        self._active_threads.discard(gt)
        self._protected_threads.discard(gt)

        try:
            gt.wait()

        except greenlet.GreenletExit:
            # we don't care much about threads killed by the container
            # this can happen in stop() and kill() if providers
            # don't properly take care of their threads
            _log.warning('%s thread killed by container', self)

        except Exception as exc:
            _log.error('%s thread exited with error', self,
                       exc_info=True)
            # any error raised inside an active thread is unexpected behavior
            # and probably a bug in the providers or container
            # to be safe we kill the container
            self.kill(exc)

    def __str__(self):
        return '<ServiceContainer [{}] at 0x{:x}>'.format(
            self.service_name, id(self))
Пример #15
0
class ServiceContainer(object):

    def __init__(self, service_cls, config):

        self.service_cls = service_cls
        self.config = config

        self.service_name = get_service_name(service_cls)
        self.shared_extensions = {}

        self.max_workers = (
            config.get(MAX_WORKERS_CONFIG_KEY) or DEFAULT_MAX_WORKERS)

        self.serializer, self.accept = serialization.setup(self.config)

        self.entrypoints = SpawningSet()
        self.dependencies = SpawningSet()
        self.subextensions = SpawningSet()

        for attr_name, dependency in inspect.getmembers(service_cls,
                                                        is_dependency):
            bound = dependency.bind(self.interface, attr_name)
            self.dependencies.add(bound)
            self.subextensions.update(iter_extensions(bound))

        for method_name, method in inspect.getmembers(service_cls, is_method):
            entrypoints = getattr(method, ENTRYPOINT_EXTENSIONS_ATTR, [])
            for entrypoint in entrypoints:
                bound = entrypoint.bind(self.interface, method_name)
                self.entrypoints.add(bound)
                self.subextensions.update(iter_extensions(bound))

        self.started = False
        self._worker_pool = GreenPool(size=self.max_workers)

        self._worker_threads = {}
        self._managed_threads = {}
        self._being_killed = False
        self._died = Event()

    @property
    def extensions(self):
        return SpawningSet(
            self.entrypoints | self.dependencies | self.subextensions
        )

    @property
    def interface(self):
        """ An interface to this container for use by extensions.
        """
        return self

    def start(self):
        """ Start a container by starting all of its extensions.
        """
        _log.debug('starting %s', self)
        self.started = True

        with _log_time('started %s', self):
            self.extensions.all.setup()
            self.extensions.all.start()

    def stop(self):
        """ Stop the container gracefully.

        First all entrypoints are asked to ``stop()``.
        This ensures that no new worker threads are started.

        It is the extensions' responsibility to gracefully shut down when
        ``stop()`` is called on them and only return when they have stopped.

        After all entrypoints have stopped the container waits for any
        active workers to complete.

        After all active workers have stopped the container stops all
        dependency providers.

        At this point there should be no more managed threads. In case there
        are any managed threads, they are killed by the container.
        """
        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        if self._being_killed:
            # this race condition can happen when a container is hosted by a
            # runner and yields during its kill method; if it's unlucky in
            # scheduling the runner will try to stop() it before self._died
            # has a result
            _log.debug('already being killed %s', self)
            try:
                self._died.wait()
            except:
                pass  # don't re-raise if we died with an exception
            return

        _log.debug('stopping %s', self)

        with _log_time('stopped %s', self):

            # entrypoint have to be stopped before dependencies to ensure
            # that running workers can successfully complete
            self.entrypoints.all.stop()

            # there might still be some running workers, which we have to
            # wait for to complete before we can stop dependencies
            self._worker_pool.waitall()

            # it should be safe now to stop any dependency as there is no
            # active worker which could be using it
            self.dependencies.all.stop()

            # finally, stop remaining extensions
            self.subextensions.all.stop()

            # any any managed threads they spawned
            self._kill_managed_threads()

            self.started = False

            # if `kill` is called after `stop`, they race to send this
            if not self._died.ready():
                self._died.send(None)

    def kill(self, exc_info=None):
        """ Kill the container in a semi-graceful way.

        Entrypoints are killed, followed by any active worker threads.
        Next, dependencies are killed. Finally, any remaining managed threads
        are killed.

        If ``exc_info`` is provided, the exception will be raised by
        :meth:`~wait``.
        """
        if self._being_killed:
            # this happens if a managed thread exits with an exception
            # while the container is being killed or if multiple errors
            # happen simultaneously
            _log.debug('already killing %s ... waiting for death', self)
            try:
                self._died.wait()
            except:
                pass  # don't re-raise if we died with an exception
            return

        self._being_killed = True

        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        if exc_info is not None:
            _log.info('killing %s due to %s', self, exc_info[1])
        else:
            _log.info('killing %s', self)

        # protect against extensions that throw during kill; the container
        # is already dying with an exception, so ignore anything else
        def safely_kill_extensions(ext_set):
            try:
                ext_set.kill()
            except Exception as exc:
                _log.warning('Extension raised `%s` during kill', exc)

        safely_kill_extensions(self.entrypoints.all)
        self._kill_worker_threads()
        safely_kill_extensions(self.extensions.all)
        self._kill_managed_threads()

        self.started = False

        # if `kill` is called after `stop`, they race to send this
        if not self._died.ready():
            self._died.send(None, exc_info)

    def wait(self):
        """ Block until the container has been stopped.

        If the container was stopped due to an exception, ``wait()`` will
        raise it.

        Any unhandled exception raised in a managed thread or in the
        worker lifecycle (e.g. inside :meth:`DependencyProvider.worker_setup`)
        results in the container being ``kill()``ed, and the exception
        raised from ``wait()``.
        """
        return self._died.wait()

    def spawn_worker(self, entrypoint, args, kwargs,
                     context_data=None, handle_result=None):
        """ Spawn a worker thread for running the service method decorated
        by `entrypoint`.

        ``args`` and ``kwargs`` are used as parameters for the service method.

        ``context_data`` is used to initialize a ``WorkerContext``.

        ``handle_result`` is an optional function which may be passed
        in by the entrypoint. It is called with the result returned
        or error raised by the service method. If provided it must return a
        value for ``result`` and ``exc_info`` to propagate to dependencies;
        these may be different to those returned by the service method.
        """

        if self._being_killed:
            _log.info("Worker spawn prevented due to being killed")
            raise ContainerBeingKilled()

        service = self.service_cls()
        worker_ctx = WorkerContext(
            self, service, entrypoint, args, kwargs, data=context_data
        )

        _log.debug('spawning %s', worker_ctx)
        gt = self._worker_pool.spawn(
            self._run_worker, worker_ctx, handle_result
        )
        gt.link(self._handle_worker_thread_exited, worker_ctx)

        self._worker_threads[worker_ctx] = gt
        return worker_ctx

    def spawn_managed_thread(self, fn, identifier=None):
        """ Spawn a managed thread to run ``fn`` on behalf of an extension.
        The passed `identifier` will be included in logs related to this
        thread, and otherwise defaults to `fn.__name__`, if it is set.

        Any uncaught errors inside ``fn`` cause the container to be killed.

        It is the caller's responsibility to terminate their spawned threads.
        Threads are killed automatically if they are still running after
        all extensions are stopped during :meth:`ServiceContainer.stop`.

        Extensions should delegate all thread spawning to the container.
        """
        if identifier is None:
            identifier = getattr(fn, '__name__', "<unknown>")

        gt = eventlet.spawn(fn)
        self._managed_threads[gt] = identifier
        gt.link(self._handle_managed_thread_exited, identifier)
        return gt

    def _run_worker(self, worker_ctx, handle_result):
        _log.debug('setting up %s', worker_ctx)

        _log.debug('call stack for %s: %s',
                   worker_ctx, '->'.join(worker_ctx.call_id_stack))

        with _log_time('ran worker %s', worker_ctx):

            self._inject_dependencies(worker_ctx)
            self._worker_setup(worker_ctx)

            result = exc_info = None
            method_name = worker_ctx.entrypoint.method_name
            method = getattr(worker_ctx.service, method_name)
            try:

                _log.debug('calling handler for %s', worker_ctx)

                with _log_time('ran handler for %s', worker_ctx):
                    result = method(*worker_ctx.args, **worker_ctx.kwargs)
            except Exception as exc:
                if isinstance(exc, worker_ctx.entrypoint.expected_exceptions):
                    _log.warning(
                        '(expected) error handling worker %s: %s',
                        worker_ctx, exc, exc_info=True)
                else:
                    _log.exception(
                        'error handling worker %s: %s', worker_ctx, exc)
                exc_info = sys.exc_info()

            if handle_result is not None:
                _log.debug('handling result for %s', worker_ctx)

                with _log_time('handled result for %s', worker_ctx):
                    result, exc_info = handle_result(
                        worker_ctx, result, exc_info)

            with _log_time('tore down worker %s', worker_ctx):

                self._worker_result(worker_ctx, result, exc_info)

                # we don't need this any more, and breaking the cycle means
                # this can be reclaimed immediately, rather than waiting for a
                # gc sweep
                del exc_info

                self._worker_teardown(worker_ctx)

    def _inject_dependencies(self, worker_ctx):
        for provider in self.dependencies:
            dependency = provider.get_dependency(worker_ctx)
            setattr(worker_ctx.service, provider.attr_name, dependency)

    def _worker_setup(self, worker_ctx):
        for provider in self.dependencies:
            provider.worker_setup(worker_ctx)

    def _worker_result(self, worker_ctx, result, exc_info):
        _log.debug('signalling result for %s', worker_ctx)
        for provider in self.dependencies:
            provider.worker_result(worker_ctx, result, exc_info)

    def _worker_teardown(self, worker_ctx):
        for provider in self.dependencies:
            provider.worker_teardown(worker_ctx)

    def _kill_worker_threads(self):
        """ Kill any currently executing worker threads.

        See :meth:`ServiceContainer.spawn_worker`
        """
        num_workers = len(self._worker_threads)

        if num_workers:
            _log.warning('killing %s active workers(s)', num_workers)
            for worker_ctx, gt in list(self._worker_threads.items()):
                _log.warning('killing active worker for %s', worker_ctx)
                gt.kill()

    def _kill_managed_threads(self):
        """ Kill any currently executing managed threads.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_threads = len(self._managed_threads)

        if num_threads:
            _log.warning('killing %s managed thread(s)', num_threads)
            for gt, identifier in list(self._managed_threads.items()):
                _log.warning('killing managed thread `%s`', identifier)
                gt.kill()

    def _handle_worker_thread_exited(self, gt, worker_ctx):
        self._worker_threads.pop(worker_ctx, None)
        self._handle_thread_exited(gt)

    def _handle_managed_thread_exited(self, gt, extension):
        self._managed_threads.pop(gt, None)
        self._handle_thread_exited(gt)

    def _handle_thread_exited(self, gt):
        try:
            gt.wait()

        except GreenletExit:
            # we don't care much about threads killed by the container
            # this can happen in stop() and kill() if extensions
            # don't properly take care of their threads
            _log.debug('%s thread killed by container', self)

        except Exception:
            _log.critical('%s thread exited with error', self, exc_info=True)
            # any uncaught error in a thread is unexpected behavior
            # and probably a bug in the extension or container.
            # to be safe we call self.kill() to kill our dependencies and
            # provide the exception info to be raised in self.wait().
            self.kill(sys.exc_info())

    def __repr__(self):
        service_name = self.service_name
        return '<ServiceContainer [{}] at 0x{:x}>'.format(
            service_name, id(self))
Пример #16
0
class Service(ConsumerMixin):
    def __init__(self, controllercls,
            connection, exchange, topic,
            pool=None, poolsize=1000):
        self.nodeid = UIDGEN()

        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.connection = connection
        self.controller = controllercls()
        self.topic = topic
        self.greenlet = None
        self.messagesem = Semaphore()
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.queues = [entities.get_topic_queue(exchange, topic),
                       entities.get_topic_queue(exchange, node_topic),
                       entities.get_fanout_queue(topic), ]
        self._channel = None
        self._consumers = None

    def start(self):
        # self.connection = newrpc.create_connection()
        if self.greenlet is not None and not self.greenlet.dead:
            raise RuntimeError()
        self.greenlet = eventlet.spawn(self.run)

    def get_consumers(self, Consumer, channel):
        return [Consumer(self.queues, callbacks=[self.on_message, ]), ]

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        self._consumers = consumers
        self._channel = channel
        self.consume_ready.send(None)

    def on_consume_end(self, connection, channel):
        self.consume_ready.reset()

    def on_message(self, body, message):
        # need a semaphore to stop killing between message ack()
        # and spawning process.
        with self.messagesem:
            self.procpool.spawn(self.handle_request, body)
            message.ack()

    def handle_request(self, body):
        newrpc.process_message(self.connection, self.controller, body)

    def wait(self):
        try:
            self.greenlet.wait()
        except greenlet.GreenletExit:
            pass
        return self.procpool.waitall()

    def kill(self):
        if self.greenlet is not None and not self.greenlet.dead:
            self.should_stop = True
            #with self.messagesem:
                #self.greenlet.kill()
            self.greenlet.wait()
        if self._consumers:
            for c in self._consumers:
                c.cancel()
        if self._channel is not None:
            self._channel.close()

    def link(self, *args, **kwargs):
        return self.greenlet.link(*args, **kwargs)

    def kill_processes(self):
        for g in self.procpool.coroutines_running:
            g.kill()
Пример #17
0
class ServiceContainer(object):

    def __init__(self, service_cls, worker_ctx_cls, config):

        self.service_cls = service_cls
        self.worker_ctx_cls = worker_ctx_cls

        self.service_name = get_service_name(service_cls)

        self.config = config
        self.max_workers = (
            config.get(MAX_WORKERS_CONFIG_KEY) or DEFAULT_MAX_WORKERS)

        self.dependencies = DependencySet()
        for dep in prepare_dependencies(self):
            self.dependencies.add(dep)

        self.started = False
        self._worker_pool = GreenPool(size=self.max_workers)

        self._active_threads = {}
        self._protected_threads = set()
        self._being_killed = False
        self._died = Event()

    @property
    def entrypoints(self):
        return filter(is_entrypoint_provider, self.dependencies)

    @property
    def injections(self):
        return filter(is_injection_provider, self.dependencies)

    def start(self):
        """ Start a container by starting all the dependency providers.
        """
        _log.debug('starting %s', self)
        self.started = True

        with _log_time('started %s', self):
            self.dependencies.all.prepare()
            self.dependencies.all.start()

    def stop(self):
        """ Stop the container gracefully.

        First all entrypoints are asked to ``stop()``.
        This ensures that no new worker threads are started.

        It is the providers' responsibility to gracefully shut down when
        ``stop()`` is called on them and only return when they have stopped.

        After all entrypoints have stopped the container waits for any
        active workers to complete.

        After all active workers have stopped the container stops all
        injections.

        At this point there should be no more managed threads. In case there
        are any managed threads, they are killed by the container.
        """
        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        if self._being_killed:
            # this race condition can happen when a container is hosted by a
            # runner and yields during its kill method; if it's unlucky in
            # scheduling the runner will try to stop() it before self._died
            # has a result
            _log.debug('already being killed %s', self)
            try:
                self._died.wait()
            except:
                pass  # don't re-raise if we died with an exception
            return

        _log.debug('stopping %s', self)

        with _log_time('stopped %s', self):
            dependencies = self.dependencies

            # entrypoint deps have to be stopped before injection deps
            # to ensure that running workers can successfully complete
            dependencies.entrypoints.all.stop()

            # there might still be some running workers, which we have to
            # wait for to complete before we can stop injection dependencies
            self._worker_pool.waitall()

            # it should be safe now to stop any injection as there is no
            # active worker which could be using it
            dependencies.injections.all.stop()

            # finally, stop nested dependencies
            dependencies.nested.all.stop()

            # just in case there was a provider not taking care of its workers,
            # or a dependency not taking care of its protected threads
            self._kill_active_threads()
            self._kill_protected_threads()

            self.started = False
            self._died.send(None)

    def kill(self, exc_info=None):
        """ Kill the container in a semi-graceful way.

        All non-protected managed threads are killed first. This includes
        all active workers generated by :meth:`ServiceContainer.spawn_worker`.
        Next, dependencies are killed. Finally, any remaining protected threads
        are killed.

        If ``exc_info`` is provided, the exception will be raised by
        :meth:`~wait``.
        """
        if self._being_killed:
            # this happens if a managed thread exits with an exception
            # while the container is being killed or if multiple errors
            # happen simultaneously
            _log.debug('already killing %s ... waiting for death', self)
            try:
                self._died.wait()
            except:
                pass  # don't re-raise if we died with an exception
            return

        self._being_killed = True

        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        if exc_info is not None:
            _log.info('killing %s due to %s', self, exc_info[1])
        else:
            _log.info('killing %s', self)

        # protect against dependencies that throw during kill; the container
        # is already dying with an exception, so ignore anything else
        def safely_kill_dependencies(dep_set):
            try:
                dep_set.kill()
            except Exception as exc:
                _log.warning('Dependency raised `%s` during kill', exc)

        safely_kill_dependencies(self.dependencies.entrypoints.all)
        self._kill_active_threads()
        safely_kill_dependencies(self.dependencies.all)
        self._kill_protected_threads()

        self.started = False
        self._died.send(None, exc_info)

    def wait(self):
        """ Block until the container has been stopped.

        If the container was stopped due to an exception, ``wait()`` will
        raise it.

        Any unhandled exception raised in a managed thread or in the
        life-cycle management code also causes the container to be
        ``kill()``ed, which causes an exception to be raised from ``wait()``.
        """
        return self._died.wait()

    def spawn_worker(self, provider, args, kwargs,
                     context_data=None, handle_result=None):
        """ Spawn a worker thread for running the service method decorated
        with an entrypoint ``provider``.

        ``args`` and ``kwargs`` are used as arguments for the service
        method.

        ``context_data`` is used to initialize a ``WorkerContext``.

        ``handle_result`` is an optional function which may be passed
        in by the entrypoint provider. It is called with the result returned
        or error raised by the service method. If provided it must return a
        value for ``result`` and ``exc_info`` to propagate to dependencies;
        these may be different to those returned by the service method.
        """

        if self._being_killed:
            _log.info("Worker spawn prevented due to being killed")
            raise ContainerBeingKilled()

        service = self.service_cls()
        worker_ctx = self.worker_ctx_cls(
            self, service, provider, args, kwargs, data=context_data)

        _log.debug('spawning %s', worker_ctx)
        gt = self._worker_pool.spawn(self._run_worker, worker_ctx,
                                     handle_result)
        self._active_threads[gt] = provider
        gt.link(self._handle_thread_exited)
        return worker_ctx

    def spawn_managed_thread(self, run_method, protected=False):
        """ Spawn a managed thread to run ``run_method``.

        Threads can be marked as ``protected``, which means the container will
        not forcibly kill them until after all dependencies have been killed.
        Dependencies that require a managed thread to complete their kill
        procedure should ensure to mark them as ``protected``.

        Any uncaught errors inside ``run_method`` cause the container to be
        killed.

        It is the caller's responsibility to terminate their spawned threads.
        Threads are killed automatically if they are still running after
        all dependencies are stopped during :meth:`ServiceContainer.stop`.

        Entrypoints may only create separate threads using this method,
        to ensure they are life-cycle managed.
        """
        gt = eventlet.spawn(run_method)
        if not protected:
            self._active_threads[gt] = MANAGED_THREAD
        else:
            self._protected_threads.add(gt)
        gt.link(self._handle_thread_exited)
        return gt

    def _run_worker(self, worker_ctx, handle_result):
        _log.debug('setting up %s', worker_ctx)

        if not worker_ctx.parent_call_stack:
            _log.debug('starting call chain')
        _log.debug('call stack for %s: %s',
                   worker_ctx, '->'.join(worker_ctx.call_id_stack))

        with _log_time('ran worker %s', worker_ctx):

            self.dependencies.injections.all.inject(worker_ctx)
            self.dependencies.all.worker_setup(worker_ctx)

            result = exc_info = None
            method = getattr(worker_ctx.service, worker_ctx.provider.name)
            try:

                _log.debug('calling handler for %s', worker_ctx)

                with _log_time('ran handler for %s', worker_ctx):
                    result = method(*worker_ctx.args, **worker_ctx.kwargs)
            except Exception as exc:
                _log.debug('error handling worker %s: %s', worker_ctx, exc,
                           exc_info=True)
                exc_info = sys.exc_info()

            if handle_result is not None:
                _log.debug('handling result for %s', worker_ctx)

                with _log_time('handled result for %s', worker_ctx):
                    result, exc_info = handle_result(
                        worker_ctx, result, exc_info)

            with _log_time('tore down worker %s', worker_ctx):

                _log.debug('signalling result for %s', worker_ctx)
                self.dependencies.injections.all.worker_result(
                    worker_ctx, result, exc_info)

                # we don't need this any more, and breaking the cycle means
                # this can be reclaimed immediately, rather than waiting for a
                # gc sweep
                del exc_info

                self.dependencies.all.worker_teardown(worker_ctx)
                self.dependencies.injections.all.release(worker_ctx)

    def _kill_active_threads(self):
        """ Kill all managed threads that were not marked as "protected" when
        they were spawned.

        This set will include all worker threads generated by
        :meth:`ServiceContainer.spawn_worker`.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_active_threads = len(self._active_threads)

        if num_active_threads:
            _log.warning('killing %s active thread(s)', num_active_threads)
            for gt, provider in list(self._active_threads.items()):
                if provider is not MANAGED_THREAD:
                    description = '{}.{}'.format(
                        self.service_name, provider.name)
                    _log.warning('killing active thread for %s', description)
                gt.kill()

    def _kill_protected_threads(self):
        """ Kill any managed threads marked as protected when they were
        spawned.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_protected_threads = len(self._protected_threads)

        if num_protected_threads:
            _log.warning('killing %s protected thread(s)',
                         num_protected_threads)
            for gt in list(self._protected_threads):
                gt.kill()

    def _handle_thread_exited(self, gt):
        self._active_threads.pop(gt, None)
        self._protected_threads.discard(gt)

        try:
            gt.wait()

        except GreenletExit:
            # we don't care much about threads killed by the container
            # this can happen in stop() and kill() if providers
            # don't properly take care of their threads
            _log.warning('%s thread killed by container', self)

        except Exception:
            _log.error('%s thread exited with error', self, exc_info=True)
            # any error raised inside an active thread is unexpected behavior
            # and probably a bug in the providers or container.
            # to be safe we call self.kill() to kill our dependencies and
            # provide the exception info to be raised in self.wait().
            self.kill(sys.exc_info())

    def __str__(self):
        return '<ServiceContainer [{}] at 0x{:x}>'.format(
            self.service_name, id(self))
Пример #18
0
class Service(ConsumerMixin):
    def __init__(
            self, controllercls, connection_factory, exchange, topic,
            pool=None, poolsize=1000):
        self.nodeid = UIDGEN()

        self.max_workers = poolsize
        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.controller = controllercls()
        self.service = self.controller
        self.topic = topic
        self.greenlet = None
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.nova_queues = [
            entities.get_topic_queue(exchange, topic),
            entities.get_topic_queue(exchange, node_topic),
            entities.get_fanout_queue(topic), ]

        self._channel = None
        self._consumers = None

        self.connection = connection_factory()
        self.connection_factory = connection_factory

        inject_dependencies(self.controller, self)

        self._connection_pool = Pool(
            max_size=self.procpool.size,
            create=connection_factory
        )

        self.workers = set()
        self._pending_ack_messages = []
        self._pending_requeue_messages = []
        self._do_cancel_consumers = False
        self._consumers_cancelled = Event()

        self._timers = list(get_timers(self.controller))

    def start(self):
        self.start_timers()
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            raise RuntimeError()
        self.greenlet = eventlet.spawn(self.run)

    def start_timers(self):
        for timer in self._timers:
            timer.start()

    def get_consumers(self, Consumer, channel):
        nova_consumer = Consumer(
            self.nova_queues, callbacks=[self.on_nova_message, ])

        consume_consumers = get_consumers(
            Consumer, self, self.on_consume_message)

        consumers = [nova_consumer] + list(consume_consumers)

        prefetch_count = self.procpool.size
        for consumer in consumers:
            consumer.qos(prefetch_count=prefetch_count)

        return consumers

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        self._consumers = consumers
        self._channel = channel
        self.consume_ready.send(None)

    def on_consume_end(self, connection, channel):
        self.consume_ready.reset()

    def on_nova_message(self, body, message):
        _log.debug('spawning RPC worker (%d free)', self.procpool.free())

        gt = self.procpool.spawn(self.handle_rpc_message, body)

        gt.link(self.handle_rpc_message_processed, message)
        self.workers.add(gt)

    def on_consume_message(self, consumer_method_config, body, message):
        _log.debug('spawning consume worker (%d free)', self.procpool.free())

        gt = self.procpool.spawn(
            self.handle_consume_message, consumer_method_config, body, message)

        gt.link(self.handle_consume_message_processed)
        self.workers.add(gt)

    def handle_rpc_message(self, body):
        # item is patched on for python with ``with``, pylint can't find it
        # pylint: disable=E1102
        with self._connection_pool.item() as connection:
            process_rpc_message(connection, self.controller, body)

    def handle_rpc_message_processed(self, gt, message):
        self.workers.discard(gt)
        self._pending_ack_messages.append(message)

    def handle_consume_message(self, consumer_method_config, body, message):
        with log_time(_log.debug, 'processed consume message in %0.3fsec'):
            consumer_method, consumer_config = consumer_method_config

            try:
                consumer_method(body)
            except Exception as e:
                if consumer_config.requeue_on_error:
                    _log.exception(
                        'failed to consume message, requeueing message: '
                        '%s(): %s', consumer_method, e)
                    self._pending_requeue_messages.append(message)
                else:
                    _log.exception(
                        'failed to consume message, ignoring message: '
                        '%s(): %s', consumer_method, e)
                    self._pending_ack_messages.append(message)
            else:
                self._pending_ack_messages.append(message)

    def handle_consume_message_processed(self, gt):
        self.workers.discard(gt)

    def on_iteration(self):
        self.process_consumer_cancellation()
        # we need to make sure we process any pending messages before shutdown
        self.process_pending_message_acks()
        self.process_shutdown()

    def process_consumer_cancellation(self):
        if self._do_cancel_consumers:
            self._do_cancel_consumers = False
            if self._consumers:
                _log.debug('cancelling consumers')
                for consumer in self._consumers:
                    consumer.cancel()
            self._consumers_cancelled.send(True)

    def process_pending_message_acks(self):
        messages = self._pending_ack_messages
        if messages:
            _log.debug('ack() %d processed messages', len(messages))
            while messages:
                msg = messages.pop()
                msg.ack()
                eventlet.sleep()

        messages = self._pending_requeue_messages
        if messages:
            _log.debug('requeue() %d processed messages', len(messages))
            while messages:
                msg = messages.pop()
                msg.requeue()
                eventlet.sleep()

    def consume(self, limit=None, timeout=None, safety_interval=0.1, **kwargs):
        """ Lifted from kombu so we are able to break the loop immediately
            after a shutdown is triggered rather than waiting for the timeout.
        """
        elapsed = 0
        with self.Consumer() as (connection, channel, consumers):
            with self.extra_context(connection, channel):
                self.on_consume_ready(connection, channel, consumers, **kwargs)
                for i in limit and xrange(limit) or count():
                    # moved from after the following `should_stop` condition to
                    # avoid waiting on a drain_events timeout before breaking
                    # the loop.
                    self.on_iteration()
                    if self.should_stop:
                        break

                    try:
                        connection.drain_events(timeout=safety_interval)
                    except socket.timeout:
                        elapsed += safety_interval
                        # Excluding the following clause from coverage,
                        # as timeout never appears to be set - This method
                        # is a lift from kombu so will leave in place for now.
                        if timeout and elapsed >= timeout:  # pragma: no cover
                            raise socket.timeout()
                    except socket.error:
                        if not self.should_stop:
                            raise
                    else:
                        yield
                        elapsed = 0

    def process_shutdown(self):
        consumers_cancelled = self._consumers_cancelled.ready()

        no_active_timers = (len(self._timers) == 0)

        no_active_workers = (self.procpool.running() < 1)

        no_pending_message_acks = not (
            self._pending_ack_messages or
            self._pending_requeue_messages
        )

        ready_to_stop = (
            consumers_cancelled and
            no_active_timers and
            no_active_workers and
            no_pending_message_acks
        )

        if ready_to_stop:
            _log.debug('notifying service to stop')
            self.should_stop = True

    def cancel_consumers(self):
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            # since consumers were started in a separate thread,
            # we will just notify the thread to avoid getting
            # "Second simultaneous read" errors
            _log.debug('notifying consumers to be cancelled')
            self._do_cancel_consumers = True
            self._consumers_cancelled.wait()
        else:
            _log.debug('consumer thread already dead')

    def cancel_timers(self):
        if self._timers:
            _log.debug('stopping %d timers', len(self._timers))
            while self._timers:
                self._timers.pop().stop()

    def kill_workers(self):
        _log.debug('force killing %d workers', len(self.workers))
        while self.workers:
            self.workers.pop().kill()

    def wait_for_workers(self):
        pool = self.procpool
        _log.debug('waiting for %d workers to complete', pool.running())
        pool.waitall()

    def shut_down(self):
        # greenlet has a magic attribute ``dead`` - pylint: disable=E1101
        if self.greenlet is not None and not self.greenlet.dead:
            _log.debug('stopping service')
            self.greenlet.wait()

        # TODO: when is this ever not None?
        if self._channel is not None:
            _log.debug('closing channel')
            self._channel.close()

    def kill(self, force=False):
        _log.debug('killing service')

        self.cancel_consumers()

        self.cancel_timers()

        if force:
            self.kill_workers()
        else:
            self.wait_for_workers()

        self.shut_down()

    def link(self, *args, **kwargs):
        return self.greenlet.link(*args, **kwargs)
Пример #19
0
def entrance():
    pool = GreenPool(100)
    for x in range(10):
        pool.spawn(fetcher)
    pool.waitall()
Пример #20
0
class AsynchronousSection(object):
    """Allows calling function asynchronously with waiting on exit."""

    MIN_POOL_SIZE = 1

    def __init__(self, size=0, ignore_errors_num=0):
        """Initialises.

        :param size: the max number of parallel tasks
        :param ignore_errors_num:
               number of errors which does not stop the execution
        """

        self.executor = GreenPool(max(size, self.MIN_POOL_SIZE))
        self.ignore_errors_num = ignore_errors_num
        self.errors = []
        self.tasks = set()

    def __enter__(self):
        self.errors[:] = []
        return self

    def __exit__(self, etype, *_):
        self.wait(etype is not None)

    def execute(self, func, *args, **kwargs):
        """Calls function asynchronously."""
        if 0 <= self.ignore_errors_num < len(self.errors):
            raise RuntimeError("Too many errors.")

        gt = self.executor.spawn(func, *args, **kwargs)
        self.tasks.add(gt)
        gt.link(self.on_complete)
        return gt

    def on_complete(self, gt):
        """Callback to handle task completion."""

        try:
            gt.wait()
        except Exception as e:
            logger.error("Task failed: %s", six.text_type(e))
            self.errors.append(sys.exc_info())
        finally:
            self.tasks.discard(gt)

    def wait(self, ignore_errors=False):
        """Waits until all tasks will be completed.

        Do not use directly, will be called from context manager.
        """
        self.executor.waitall()
        if len(self.errors) > 0:
            for exc_info in self.errors:
                logger.exception("error details.", exc_info=exc_info)

            self.errors[:] = []
            if not ignore_errors:
                raise RuntimeError(
                    "Operations completed with errors.\n"
                    "See log for more details."
                )
Пример #21
0
            'type'].firstChild.nodeValue
        service_params['host'] = service.getElementsByTagName(
            'host')[0].firstChild.nodeValue
        service_params['exchange'] = service.getElementsByTagName('params')[
            0].getElementsByTagName('exchange')[0].firstChild.nodeValue
        service_params['routing_key'] = service.getElementsByTagName(
            'params')[0].getElementsByTagName('queue')[0].firstChild.nodeValue
        service_params['on_request_name'] = service.getElementsByTagName(
            'params')[0].getElementsByTagName(
                'callback')[0].firstChild.nodeValue

        services_dict[service_name] = service_params

    return services_dict


#For Test
if __name__ == '__main__':

    import threading
    pool = GreenPool()

    services = get_services()
    service = sys.argv[1]

    if service in services.keys():
        pool.spawn(Worker().start(**services.get(service, {})))

    else:
        print 'no such service'
Пример #22
0
    def __init__(self, configuration, overrides=None):
        """
        Here we register the configuration options and extract all the
        configuration data.

        We also establish connections to the fabrics for use in subsequent
        operations.

        This will raise a ZoneManagerParallel exception if anything goes wrong
        during the connection process.
        """
        self.conf = configuration
        self.conf.register_opts(zone_manager_opts)

        fabric_names = self.conf.fc_fabric_names.split(',')

        # Check for uniqueness of names
        if len(fabric_names) != len(set(fabric_names)):
            msg = _("Fabric names are not unique: %(fabric_names)s") % \
                {'fabric_names': ','.join(fabric_names)}
            raise exception.ZoneManagerMisconfig(reason=msg)

        # First we add all the configuration options that we expect, given
        # the list of fabric names
        fc_fabric_opts = []

        for fabric_name in fabric_names:
            fc_fabric_opts.append(cfg.StrOpt('fc_fabric_address_'
                                             + fabric_name,
                                             default='',
                                             help='Management IP'
                                             ' of fabric'))
            fc_fabric_opts.append(cfg.StrOpt('fc_fabric_user_'
                                             + fabric_name,
                                             default='',
                                             help='Fabric user ID'))
            fc_fabric_opts.append(cfg.StrOpt('fc_fabric_password_'
                                             + fabric_name,
                                             default='',
                                             secret=True,
                                             help='Password for user'))
            fc_fabric_opts.append(cfg.IntOpt('fc_fabric_port_'
                                             + fabric_name,
                                             default=22,
                                             help='Connecting port'))
            fc_fabric_opts.append(cfg.FloatOpt('fc_fabric_timeout_'
                                               + fabric_name,
                                               default=10.0,
                                               help='Connection timeout '
                                                    '(seconds)'))
            fc_fabric_opts.append(cfg.FloatOpt('fc_fabric_cmd_timeout_'
                                               + fabric_name,
                                               default=100.0,
                                               help='Command execution '
                                                    'timeout (seconds)'))
            fc_fabric_opts.append(cfg.StrOpt('fc_fabric_display_name_'
                                             + fabric_name,
                                             default="",
                                             help='Display name'))
            fc_fabric_opts.append(cfg.StrOpt('zoning_policy_'
                                             + fabric_name,
                                             default=self.conf.zoning_policy,
                                             help='overridden '
                                             'zoning policy'))
            fc_fabric_opts.append(cfg.BoolOpt('zone_activate_'
                                              + fabric_name,
                                              default=self.conf.zone_activate,
                                              help='overridden zoning '
                                              'activation state'))
            fc_fabric_opts.append(cfg.StrOpt('zone_name_prefix_'
                                             + fabric_name,
                                             default=
                                             self.conf.zone_name_prefix,
                                             help='overridden zone '
                                             'name prefix'))
            fc_fabric_opts.append(cfg.StrOpt('principal_switch_wwn_'
                                             + fabric_name,
                                             default=fabric_name,
                                             help='Principal switch '
                                             'WWN of the fabric'))
            fc_fabric_opts.append(cfg.IntOpt('fc_fabric_num_attempts_'
                                             + fabric_name,
                                             default=3,
                                             help='Number of attempts of '
                                             'an operation'))
            fc_fabric_opts.append(cfg.IntOpt('fc_fabric_min_retry_gap_'
                                             + fabric_name,
                                             default=10,
                                             help='Minimum time to wait '
                                             'before retrying a failed '
                                             'operation'))
            fc_fabric_opts.append(cfg.IntOpt('fc_fabric_max_retry_gap_'
                                             + fabric_name,
                                             default=20,
                                             help='Maximum time to wait '
                                             'before retrying a failed '
                                             'operation'))

        self.conf.append_config_values(fc_fabric_opts)

        # If running in a process without fabric CLI options passed, allow
        # those options to be given and set via the overrides param, which
        # is a list of (key, value) tuples.
        if overrides:
            for item in overrides:
                self.conf.local_conf.set_override(item[0], item[1])

        # Now we initialise a connection to the switch for each of the fabrics

        # This function is called in a GreenThread for each registered switch.
        def _do_connect(context, fabric_name):
            # Mark this thread as running the passed-in context, so that log
            # messages can be correlated.
            context.update_store()

            @lockutils.synchronized(fabric_name, 'fcfabric-', True)
            def _do_locked_connect(fabric_name):
                fabric_ip = self.conf.safe_get('fc_fabric_address_' +
                                               fabric_name)
                fabric_user = self.conf.safe_get('fc_fabric_user_' +
                                                 fabric_name)
                fabric_pwd = self.conf.safe_get('fc_fabric_password_' +
                                                fabric_name)
                fabric_port = self.conf.safe_get('fc_fabric_port_' +
                                                 fabric_name)
                fabric_timeout = self.conf.safe_get('fc_fabric_timeout_' +
                                                    fabric_name)
                fabric_cmd_timeout = \
                    self.conf.safe_get('fc_fabric_cmd_timeout_' +
                                       fabric_name)

                fabric_display_name = \
                    self.conf.safe_get('fc_fabric_display_name_' +
                                       fabric_name)

                fabric_num_retries = \
                    self.conf.safe_get('fc_fabric_num_attempts_' +
                                       fabric_name)

                fabric_min_retry_gap = \
                    self.conf.safe_get('fc_fabric_min_retry_gap_' +
                                       fabric_name)

                fabric_max_retry_gap = \
                    self.conf.safe_get('fc_fabric_max_retry_gap_' +
                                       fabric_name)

                descriptor = exception.FabricDescriptor(fabric_name,
                                                        fabric_display_name,
                                                        fabric_user,
                                                        fabric_ip,
                                                        fabric_port)

                conn = BrcdFCZoneClientCLI(fabric_ip, fabric_user,
                                           fabric_pwd, fabric_port,
                                           fabric_timeout,
                                           fabric_cmd_timeout,
                                           descriptor,
                                           fabric_num_retries,
                                           fabric_min_retry_gap,
                                           fabric_max_retry_gap)

                return conn

            return _do_locked_connect(fabric_name)

        # Start a GreenThread for each fabric that we will connect to and
        # initiate the connection in it.
        pool = GreenPool(size=len(fabric_names))

        # Obtain our current context so that we can make sure that our child
        # threads have the same context, so that we can correlate log messages
        # that they generate.
        context = getattr(local.store, 'context', None)

        threads = {}
        for fabric_name in fabric_names:
            thread = pool.spawn(_do_connect, context, fabric_name)
            threads[fabric_name] = thread

        # Collect the resulting connection objects.
        # The wait() will raise an exception if something went wrong.
        exceptions = []
        for fabric_name, thread in threads.iteritems():
            try:
                self.fabrics[fabric_name] = thread.wait()
                LOG.info(_("Connection established to fabric %(f_name)s") %
                         dict(f_name=fabric_name))
            except Exception as e:
                exceptions.append(e)

        # If any exceptions were raised, we throw an exception that
        # encapsulates them all.
        if exceptions:
            raise exception.ZoneManagerParallel(exceptions)
Пример #23
0
class ServiceContainer(object):
    def __init__(self, service_cls, config):

        self.service_cls = service_cls
        self.config = config

        self.service_name = get_service_name(service_cls)
        self.shared_extensions = {}

        self.max_workers = (config.get(MAX_WORKERS_CONFIG_KEY)
                            or DEFAULT_MAX_WORKERS)

        self.serializer = config.get(SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER)

        self.accept = [self.serializer]

        self.entrypoints = SpawningSet()
        self.dependencies = SpawningSet()
        self.subextensions = SpawningSet()

        for attr_name, dependency in inspect.getmembers(
                service_cls, is_dependency):
            bound = dependency.bind(self.interface, attr_name)
            self.dependencies.add(bound)
            self.subextensions.update(iter_extensions(bound))

        for method_name, method in inspect.getmembers(service_cls, is_method):
            entrypoints = getattr(method, ENTRYPOINT_EXTENSIONS_ATTR, [])
            for entrypoint in entrypoints:
                bound = entrypoint.bind(self.interface, method_name)
                self.entrypoints.add(bound)
                self.subextensions.update(iter_extensions(bound))

        self.started = False
        self._worker_pool = GreenPool(size=self.max_workers)

        self._worker_threads = {}
        self._managed_threads = {}
        self._being_killed = False
        self._died = Event()

    @property
    def extensions(self):
        return SpawningSet(self.entrypoints | self.dependencies
                           | self.subextensions)

    @property
    def interface(self):
        """ An interface to this container for use by extensions.
        """
        return self

    def start(self):
        """ Start a container by starting all of its extensions.
        """
        _log.debug('starting %s', self)
        self.started = True

        with _log_time('started %s', self):
            self.extensions.all.setup()
            self.extensions.all.start()

    def stop(self):
        """ Stop the container gracefully.

        First all entrypoints are asked to ``stop()``.
        This ensures that no new worker threads are started.

        It is the extensions' responsibility to gracefully shut down when
        ``stop()`` is called on them and only return when they have stopped.

        After all entrypoints have stopped the container waits for any
        active workers to complete.

        After all active workers have stopped the container stops all
        dependency providers.

        At this point there should be no more managed threads. In case there
        are any managed threads, they are killed by the container.
        """
        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        if self._being_killed:
            # this race condition can happen when a container is hosted by a
            # runner and yields during its kill method; if it's unlucky in
            # scheduling the runner will try to stop() it before self._died
            # has a result
            _log.debug('already being killed %s', self)
            try:
                self._died.wait()
            except:
                pass  # don't re-raise if we died with an exception
            return

        _log.debug('stopping %s', self)

        with _log_time('stopped %s', self):

            # entrypoint have to be stopped before dependencies to ensure
            # that running workers can successfully complete
            self.entrypoints.all.stop()

            # there might still be some running workers, which we have to
            # wait for to complete before we can stop dependencies
            self._worker_pool.waitall()

            # it should be safe now to stop any dependency as there is no
            # active worker which could be using it
            self.dependencies.all.stop()

            # finally, stop remaining extensions
            self.subextensions.all.stop()

            # any any managed threads they spawned
            self._kill_managed_threads()

            self.started = False

            # if `kill` is called after `stop`, they race to send this
            if not self._died.ready():
                self._died.send(None)

    def kill(self, exc_info=None):
        """ Kill the container in a semi-graceful way.

        Entrypoints are killed, followed by any active worker threads.
        Next, dependencies are killed. Finally, any remaining managed threads
        are killed.

        If ``exc_info`` is provided, the exception will be raised by
        :meth:`~wait``.
        """
        if self._being_killed:
            # this happens if a managed thread exits with an exception
            # while the container is being killed or if multiple errors
            # happen simultaneously
            _log.debug('already killing %s ... waiting for death', self)
            try:
                self._died.wait()
            except:
                pass  # don't re-raise if we died with an exception
            return

        self._being_killed = True

        if self._died.ready():
            _log.debug('already stopped %s', self)
            return

        if exc_info is not None:
            _log.info('killing %s due to %s', self, exc_info[1])
        else:
            _log.info('killing %s', self)

        # protect against extensions that throw during kill; the container
        # is already dying with an exception, so ignore anything else
        def safely_kill_extensions(ext_set):
            try:
                ext_set.kill()
            except Exception as exc:
                _log.warning('Extension raised `%s` during kill', exc)

        safely_kill_extensions(self.entrypoints.all)
        self._kill_worker_threads()
        safely_kill_extensions(self.extensions.all)
        self._kill_managed_threads()

        self.started = False

        # if `kill` is called after `stop`, they race to send this
        if not self._died.ready():
            self._died.send(None, exc_info)

    def wait(self):
        """ Block until the container has been stopped.

        If the container was stopped due to an exception, ``wait()`` will
        raise it.

        Any unhandled exception raised in a managed thread or in the
        worker lifecycle (e.g. inside :meth:`DependencyProvider.worker_setup`)
        results in the container being ``kill()``ed, and the exception
        raised from ``wait()``.
        """
        return self._died.wait()

    def spawn_worker(self,
                     entrypoint,
                     args,
                     kwargs,
                     context_data=None,
                     handle_result=None):
        """ Spawn a worker thread for running the service method decorated
        by `entrypoint`.

        ``args`` and ``kwargs`` are used as parameters for the service method.

        ``context_data`` is used to initialize a ``WorkerContext``.

        ``handle_result`` is an optional function which may be passed
        in by the entrypoint. It is called with the result returned
        or error raised by the service method. If provided it must return a
        value for ``result`` and ``exc_info`` to propagate to dependencies;
        these may be different to those returned by the service method.
        """

        if self._being_killed:
            _log.info("Worker spawn prevented due to being killed")
            raise ContainerBeingKilled()

        service = self.service_cls()
        worker_ctx = WorkerContext(self,
                                   service,
                                   entrypoint,
                                   args,
                                   kwargs,
                                   data=context_data)

        _log.debug('spawning %s', worker_ctx)
        gt = self._worker_pool.spawn(self._run_worker, worker_ctx,
                                     handle_result)
        gt.link(self._handle_worker_thread_exited, worker_ctx)

        self._worker_threads[worker_ctx] = gt
        return worker_ctx

    def spawn_managed_thread(self, fn, identifier=None):
        """ Spawn a managed thread to run ``fn`` on behalf of an extension.
        The passed `identifier` will be included in logs related to this
        thread, and otherwise defaults to `fn.__name__`, if it is set.

        Any uncaught errors inside ``fn`` cause the container to be killed.

        It is the caller's responsibility to terminate their spawned threads.
        Threads are killed automatically if they are still running after
        all extensions are stopped during :meth:`ServiceContainer.stop`.

        Extensions should delegate all thread spawning to the container.
        """
        if identifier is None:
            identifier = getattr(fn, '__name__', "<unknown>")

        gt = eventlet.spawn(fn)
        self._managed_threads[gt] = identifier
        gt.link(self._handle_managed_thread_exited, identifier)
        return gt

    def _run_worker(self, worker_ctx, handle_result):
        _log.debug('setting up %s', worker_ctx)

        _log.debug('call stack for %s: %s', worker_ctx,
                   '->'.join(worker_ctx.call_id_stack))

        with _log_time('ran worker %s', worker_ctx):

            self._inject_dependencies(worker_ctx)
            self._worker_setup(worker_ctx)

            result = exc_info = None
            method_name = worker_ctx.entrypoint.method_name
            method = getattr(worker_ctx.service, method_name)
            try:

                _log.debug('calling handler for %s', worker_ctx)

                with _log_time('ran handler for %s', worker_ctx):
                    result = method(*worker_ctx.args, **worker_ctx.kwargs)
            except Exception as exc:
                _log.info('error handling worker %s: %s',
                          worker_ctx,
                          exc,
                          exc_info=True)
                exc_info = sys.exc_info()

            if handle_result is not None:
                _log.debug('handling result for %s', worker_ctx)

                with _log_time('handled result for %s', worker_ctx):
                    result, exc_info = handle_result(worker_ctx, result,
                                                     exc_info)

            with _log_time('tore down worker %s', worker_ctx):

                self._worker_result(worker_ctx, result, exc_info)

                # we don't need this any more, and breaking the cycle means
                # this can be reclaimed immediately, rather than waiting for a
                # gc sweep
                del exc_info

                self._worker_teardown(worker_ctx)

    def _inject_dependencies(self, worker_ctx):
        for provider in self.dependencies:
            dependency = provider.get_dependency(worker_ctx)
            setattr(worker_ctx.service, provider.attr_name, dependency)

    def _worker_setup(self, worker_ctx):
        for provider in self.dependencies:
            provider.worker_setup(worker_ctx)

    def _worker_result(self, worker_ctx, result, exc_info):
        _log.debug('signalling result for %s', worker_ctx)
        for provider in self.dependencies:
            provider.worker_result(worker_ctx, result, exc_info)

    def _worker_teardown(self, worker_ctx):
        for provider in self.dependencies:
            provider.worker_teardown(worker_ctx)

    def _kill_worker_threads(self):
        """ Kill any currently executing worker threads.

        See :meth:`ServiceContainer.spawn_worker`
        """
        num_workers = len(self._worker_threads)

        if num_workers:
            _log.warning('killing %s active workers(s)', num_workers)
            for worker_ctx, gt in list(self._worker_threads.items()):
                _log.warning('killing active worker for %s', worker_ctx)
                gt.kill()

    def _kill_managed_threads(self):
        """ Kill any currently executing managed threads.

        See :meth:`ServiceContainer.spawn_managed_thread`
        """
        num_threads = len(self._managed_threads)

        if num_threads:
            _log.warning('killing %s managed thread(s)', num_threads)
            for gt, identifier in list(self._managed_threads.items()):
                _log.warning('killing managed thread `%s`', identifier)
                gt.kill()

    def _handle_worker_thread_exited(self, gt, worker_ctx):
        self._worker_threads.pop(worker_ctx, None)
        self._handle_thread_exited(gt)

    def _handle_managed_thread_exited(self, gt, extension):
        self._managed_threads.pop(gt, None)
        self._handle_thread_exited(gt)

    def _handle_thread_exited(self, gt):
        try:
            gt.wait()

        except GreenletExit:
            # we don't care much about threads killed by the container
            # this can happen in stop() and kill() if extensions
            # don't properly take care of their threads
            _log.debug('%s thread killed by container', self)

        except Exception:
            _log.error('%s thread exited with error', self, exc_info=True)
            # any uncaught error in a thread is unexpected behavior
            # and probably a bug in the extension or container.
            # to be safe we call self.kill() to kill our dependencies and
            # provide the exception info to be raised in self.wait().
            self.kill(sys.exc_info())

    def __repr__(self):
        service_name = self.service_name
        return '<ServiceContainer [{}] at 0x{:x}>'.format(
            service_name, id(self))
Пример #24
0
class Service(ConsumerMixin):
    def __init__(self,
                 controllercls,
                 connection,
                 exchange,
                 topic,
                 pool=None,
                 poolsize=1000):
        self.nodeid = UIDGEN()

        if pool is None:
            self.procpool = GreenPool(size=poolsize)
        else:
            self.procpool = pool

        self.connection = connection
        self.controller = controllercls()
        self.topic = topic
        self.greenlet = None
        self.messagesem = Semaphore()
        self.consume_ready = Event()

        node_topic = "{}.{}".format(self.topic, self.nodeid)
        self.queues = [
            entities.get_topic_queue(exchange, topic),
            entities.get_topic_queue(exchange, node_topic),
            entities.get_fanout_queue(topic),
        ]
        self._channel = None
        self._consumers = None

    def start(self):
        # self.connection = newrpc.create_connection()
        if self.greenlet is not None and not self.greenlet.dead:
            raise RuntimeError()
        self.greenlet = eventlet.spawn(self.run)

    def get_consumers(self, Consumer, channel):
        return [
            Consumer(self.queues, callbacks=[
                self.on_message,
            ]),
        ]

    def on_consume_ready(self, connection, channel, consumers, **kwargs):
        self._consumers = consumers
        self._channel = channel
        self.consume_ready.send(None)

    def on_consume_end(self, connection, channel):
        self.consume_ready.reset()

    def on_message(self, body, message):
        # need a semaphore to stop killing between message ack()
        # and spawning process.
        with self.messagesem:
            self.procpool.spawn(self.handle_request, body)
            message.ack()

    def handle_request(self, body):
        newrpc.process_message(self.connection, self.controller, body)

    def wait(self):
        try:
            self.greenlet.wait()
        except greenlet.GreenletExit:
            pass
        return self.procpool.waitall()

    def kill(self):
        if self.greenlet is not None and not self.greenlet.dead:
            self.should_stop = True
            #with self.messagesem:
            #self.greenlet.kill()
            self.greenlet.wait()
        if self._consumers:
            for c in self._consumers:
                c.cancel()
        if self._channel is not None:
            self._channel.close()

    def link(self, *args, **kwargs):
        return self.greenlet.link(*args, **kwargs)

    def kill_processes(self):
        for g in self.procpool.coroutines_running:
            g.kill()