Example #1
0
    def wsgi_request(self,
                     environ=None,
                     loop=None,
                     path=None,
                     app_handler=None,
                     urlargs=None,
                     **kw):
        '''Create a :class:`.WsgiRequest` from a wsgi ``environ`` and set the
        ``app`` attribute in the cache.
        Additional keyed-valued parameters can be inserted.
        '''
        if not environ:
            # No WSGI environment, build a test one
            environ = test_wsgi_environ(path=path, loop=loop, **kw)
        request = wsgi_request(environ,
                               app_handler=app_handler,
                               urlargs=urlargs)
        environ['error.handler'] = self.config['ERROR_HANDLER']
        environ['default.content_type'] = self.config['DEFAULT_CONTENT_TYPE']
        # Check if pulsar is serving the application
        if 'pulsar.cfg' not in environ:
            if not self.cfg:
                self.cfg = pulsar.Config(debug=self.debug)
            environ['pulsar.cfg'] = self.cfg

        request.cache.app = self
        return request
Example #2
0
 def create_parser(self, prog_name, subcommand):
     cfg = pulsar.Config(apps=['socket', 'pulse'],
                         exclude=['debug'],
                         description=self.help,
                         version=self.get_version())
     parser = cfg.parser()
     for option in self.option_list:
         flags = []
         if option._short_opts:
             flags.extend(option._short_opts)
         if option._long_opts:
             flags.extend(option._long_opts)
         type = option.type
         if type == 'choice':
             type = None
         s = Setting(option.dest,
                     flags=flags,
                     choices=option.choices,
                     default=option.default,
                     action=option.action,
                     type=type,
                     nargs=option.nargs,
                     desc=option.help)
         s.add_argument(parser)
     return parser
Example #3
0
 def __init__(self):
     cfg = pulsar.Config()
     cfg.parse_command_line()
     a = pulsar.arbiter(cfg=cfg)
     self.cfg = a.cfg
     self._loop = a._loop
     self._loop.call_later(1, pulsar. async, self())
     a.start()
Example #4
0
 def test_dummy_proxy(self):
     p = pulsar.concurrency('thread', pulsar.Actor, pulsar.get_actor(),
                            pulsar.Config())
     self.assertEqual(p.mailbox, None)
     self.assertEqual(p.spawning_start, None)
     self.assertEqual(p.stopping_start, None)
     self.assertEqual(p.callback, None)
     self.assertEqual(str(p), 'actor(%s)' % p.aid)
Example #5
0
File: app.py Project: tourist/lux
 def _setup_logger(self, config, module, opts):
     debug = opts.debug or self.params.get('debug', False)
     cfg = pulsar.Config()
     cfg.set('debug', debug)
     cfg.set('loglevel', opts.loglevel)
     cfg.set('loghandlers', opts.loghandlers)
     self.debug = cfg.debug
     self.logger = cfg.configured_logger('lux')
Example #6
0
class TaskQueue(pulsar.Application):
    '''A :class:`.Application` for consuming task.Tasks.

    This application can also schedule periodic tasks when the
    :ref:`schedule_periodic <setting-schedule_periodic>` flag is ``True``.
    '''
    backend = None
    '''The :ref:`TaskBackend <apps-taskqueue-backend>` for this task queue.

    This picklable attribute is available once the :class:`TaskQueue` has
    started (when the :meth:`monitor_start` method is invoked by the
    :class:`.Monitor` running it).
    '''
    name = 'tasks'
    cfg = pulsar.Config(apps=('tasks', ), timeout=600)

    def monitor_start(self, monitor):
        '''Starts running the task queue in ``monitor``.

        It calles the :attr:`.Application.callable` (if available)
        and create the :attr:`backend`.
        '''
        if self.callable:
            self.callable()
        self.backend = TaskBackend.make(
            self.cfg.task_backend,
            name=self.name,
            task_paths=self.cfg.task_paths,
            schedule_periodic=self.cfg.schedule_periodic,
            max_tasks=self.cfg.max_requests,
            backlog=self.cfg.concurrent_tasks)

    def monitor_task(self, monitor):
        '''Override the :meth:`.Application.monitor_task` callback.

        Check if the :attr:`backend` needs to schedule new tasks.
        '''
        if self.backend and monitor.is_running():
            if self.backend.next_run <= datetime.now():
                self.backend.tick()

    def worker_start(self, worker):
        self.backend.start(worker)

    def worker_stopping(self, worker):
        self.backend.close(worker)

    def actorparams(self, monitor, params):
        params['app'].cfg.set('schedule_periodic', False)

    def worker_info(self, worker, info=None):
        be = self.backend
        tasks = {
            'concurrent': list(be.concurrent_tasks),
            'processed': be.processed
        }
        info['tasks'] = tasks
Example #7
0
class PulsarShell(pulsar.Application):
    name = 'shell'
    cfg = pulsar.Config(loglevel=['none'],
                        process_name='Pulsar shell',
                        console_class=InteractiveConsole)

    def monitor_start(self, monitor):
        '''make sure :ref:`workers <setting-workers>` and
        :ref:`thread_workers <setting-thread_workers>` are both set to 1 and
        :ref:`concurrency <setting-concurrency>` is ``thread``.
        '''
        monitor.cfg.set('workers', 0)
        monitor.cfg.set('thread_workers', 1)
        monitor.cfg.set('concurrency', 'thread')

    def worker_start(self, worker, exc=None):  # pragma    nocover
        '''When the worker starts, create the :attr:`~.Actor.thread_pool`
        and send the :meth:`interact` method to it.'''
        if not exc:
            worker.executor().submit(self.start_shell, worker)

    def start_shell(self, worker):
        pulsar.help = self._show_help
        imported_objects = {
            'pshell': self,
            'pulsar': pulsar,
            'get_actor': pulsar.get_actor,
            'spawn': pulsar.spawn,
            'send': pulsar.send
        }
        imported_objects.update(worker.cfg.params.get('imported_objects', {}))
        try:  # Try activating rlcompleter, because it's handy.
            import readline
        except ImportError:  # pragma    nocover
            pass
        else:  # pragma    nocover
            import rlcompleter
            readline.set_completer(
                rlcompleter.Completer(imported_objects).complete)
            readline.parse_and_bind("tab:complete")
        self.console = self.cfg.console_class(imported_objects)
        self.console.setup()
        worker.executor().submit(self.interact, worker)

    def interact(self, worker):
        '''Handled by the :attr:`Actor.thread_pool`'''
        try:
            self.console.interact(self.cfg.timeout)
        except:
            worker._loop.stop()
        else:
            worker.executor().submit(self.interact, worker)

    def _show_help(self):
        print(_pshell_help)
Example #8
0
 def _setup_logger(self, config, module, opts):
     debug = opts.debug or self.params.get('debug', False)
     cfg = pulsar.Config()
     cfg.set('debug', debug)
     cfg.set('loglevel', opts.loglevel)
     cfg.set('loghandlers', opts.loghandlers)
     self.debug = cfg.debug
     if self.params.get('SETUP_LOGGER', True):
         self.logger = cfg.configured_logger('lux')
     else:
         super()._setup_logger(config, module, opts)
Example #9
0
class WSGIServer(SocketServer):
    '''A WSGI :class:`.SocketServer`.
    '''
    name = 'wsgi'
    cfg = pulsar.Config(apps=['socket'],
                        server_software=pulsar.SERVER_SOFTWARE)

    def protocol_factory(self):
        cfg = self.cfg
        consumer_factory = partial(HttpServerResponse, cfg.callable, cfg,
                                   cfg.server_software)
        return partial(Connection, consumer_factory)
Example #10
0
    def create_config(cls, params, prefix=None, name=None):
        '''Create a new :class:`pulsar.utils.config.Config` container.

        Overrides defaults with ``params``.'''
        if cls.cfg:
            cfg = cls.cfg.copy(name=name, prefix=prefix)
            # update with latest settings
            cfg.update_settings()
            cfg.update(params)
        else:
            cfg = pulsar.Config(name=name, prefix=prefix, **params)
        return cfg
Example #11
0
class server(pulsar.MultiApp):
    '''Build a multi-app consisting of

    * The :class:`.DiningPhilosophers` application
    * A wsgi server for displaying actions on the browser
    '''
    cfg = pulsar.Config('Dining philosophers sit at a table around a bowl of '
                        'spaghetti and waits for available forks.',
                        data_store=ds.pulsards_url())

    def build(self):
        yield self.new_app(DiningPhilosophers)
Example #12
0
def _spawn_actor(cls, monitor, cfg=None, name=None, aid=None, **kw):
    # Internal function which spawns a new Actor and return its
    # ActorProxyMonitor.
    # *cls* is the Actor class
    # *monitor* can be either the ariber or a monitor
    kind = None
    if issubclass(cls, PoolMixin):
        kind = 'monitor'
    if cfg is None:
        if monitor:
            cfg = monitor.cfg.copy()
        else:
            cfg = pulsar.Config()
    if monitor:
        params = monitor.actorparams()
        name = params.pop('name', name)
        aid = params.pop('aid', aid)
    else:  # monitor not available, this is the arbiter
        if kind != 'monitor':
            raise TypeError('class %s not a valid monitor' % cls)
        kind = 'arbiter'
        params = {}
    for key, value in iteritems(kw):
        if key in cfg.settings:
            cfg.set(key, value)
        else:
            params[key] = value
    #
    if monitor:
        if not kind:
            if not issubclass(cls, Actor):
                raise TypeError('Class %s not a valid actor.' % cls)
            kind = cfg.concurrency
    if not kind:
        raise TypeError('Cannot spawn class %s. not a valid concurrency.' %
                        cls)
    actor_proxy = concurrency(kind,
                              cls,
                              monitor,
                              cfg,
                              name=name,
                              aid=aid,
                              **params)
    # Add to the list of managed actors if this is a remote actor
    if isinstance(actor_proxy, Actor):
        return actor_proxy
    else:
        actor_proxy.monitor = monitor
        monitor.managed_actors[actor_proxy.aid] = actor_proxy
        deferred = proxy.ActorProxyDeferred(actor_proxy)
        actor_proxy.start()
        return deferred
Example #13
0
class server(pulsar.MultiApp):
    '''Build a multi-app consisting on a taskqueue and a JSON-RPC server.

    This class shows how to
    use :class:`pulsar.apps.MultiApp` utility for starting several
    :ref:`pulsar applications <apps-framework>` at once.
    '''
    cfg = pulsar.Config('Taskqueue with JSON-RPC API example')

    def build(self):
        yield self.new_app(tasks.TaskQueue, task_paths=TASK_PATHS)
        yield self.new_app(wsgi.WSGIServer, prefix='rpc',
                           callable=Rpc(self.name))
Example #14
0
 def handle(self, *args, **options):
     if args:
         raise CommandError('pulse --help for usage')
     app_name = options.get('pulse_app_name')
     callable = Wsgi()
     if options.pop('dryrun', False) is True:  # used for testing
         return callable
     # callable.setup()
     cfg = pulsar.Config(apps=['socket', 'pulse'],
                         server_software=pulsar.SERVER_SOFTWARE,
                         **options)
     server = WSGIServer(callable=callable, name=app_name, cfg=cfg,
                         parse_console=False)
     callable.cfg = server.cfg
     server.start()
Example #15
0
class WSGIServer(SocketServer):
    '''A WSGI :class:`.SocketServer`.
    '''
    name = 'wsgi'
    cfg = pulsar.Config(apps=['socket', 'wsgi'],
                        server_software=pulsar.SERVER_SOFTWARE)

    def protocol_consumer(self):
        '''Build the :class:`.ProtocolConsumer` factory.

        It uses the :class:`pulsar.apps.wsgi.server.HttpServerResponse`
        protocol consumer and the wsgi callable provided as parameter during
        initialisation.'''
        c = self.cfg
        return partial(HttpServerResponse, self.callable, c, c.server_software)
Example #16
0
class PulsarShell(pulsar.Application):
    name = 'shell'
    cfg = pulsar.Config(loglevel='none',
                        process_name='Pulsar shell',
                        console_class=InteractiveConsole)

    def monitor_start(self, monitor):
        monitor.cfg.set('workers', 1)
        monitor.cfg.set('concurrency', 'thread')

    def worker_start(self, worker):  # pragma    nocover
        '''When the worker starts, create the :attr:`Actor.thread_pool`
with one thread only and send the :meth:`interact` method to it.'''
        worker.create_thread_pool()
        worker.thread_pool.apply(self.start_shell, worker)

    def start_shell(self, worker):
        imported_objects = {
            'pshell': self,
            'pulsar': pulsar,
            'get_actor': pulsar.get_actor,
            'spawn': pulsar.spawn,
            'send': pulsar.send,
            'Actor': pulsar.Actor
        }
        try:  # Try activating rlcompleter, because it's handy.
            import readline
        except ImportError:  # pragma    nocover
            pass
        else:  # pragma    nocover
            import rlcompleter
            readline.set_completer(
                rlcompleter.Completer(imported_objects).complete)
            readline.parse_and_bind("tab:complete")
        self.local.console = self.cfg.console_class(imported_objects)
        self.local.console.setup()
        worker.thread_pool.apply(self.interact, worker)

    def interact(self, worker):
        '''Handled by the :attr:`Actor.thread_pool`'''
        try:
            self.local.console.interact(self.cfg.timeout)
            worker.thread_pool.apply(self.interact, worker)
        except:
            worker.send('arbiter', 'stop')
Example #17
0
class Bench(pulsar.Application):
    cfg = pulsar.Config(apps=['bench'])

    def monitor_start(self, monitor, exc=None):
        if monitor.cfg.filldb:
            self.pool = GreenPool()
            try:
                yield from self.pool.submit(self.filldb)
            finally:
                monitor._loop.stop()
        else:
            monitor.ready = 0

    def worker_start(self, worker, exc=None):
        if not exc:
            worker._loop.call_later(1, async, wormup(worker, POOL_SIZES[0]))

    def filldb(self):
        '''Fill database
        '''
        from app import World, Fortune, odm, MAXINT

        mapper = odm.Mapper(self.cfg.postgresql)
        mapper.register(World)
        mapper.register(Fortune)
        mapper.table_create()

        with mapper.begin() as session:
            query = session.query(mapper.world)
            N = query.count()
            todo = max(0, MAXINT - N)
            if todo:
                for _ in range(todo):
                    world = mapper.world(randomNumber=randint(1, MAXINT))
                    session.add(world)

        if todo:
            odm.logger.info('Created %d World models', todo)
        else:
            odm.logger.info('%d World models already available', N)
Example #18
0
 def __new__(cls, name, bases, attrs):
     settings = {}
     for base in bases:
         if isinstance(base, TestPluginMeta):
             settings.update(base.config.settings)
     for key, setting in list(iteritems(attrs)):
         if isinstance(setting, pulsar.Setting):
             attrs.pop(key)
             setting.name = setting.name or key.lower()
             settings[setting.name] = as_test_setting(setting)
     if not attrs.pop('virtual', False):
         setting_name = attrs.pop('name', name).lower()
         if setting_name:
             def_flag = '--%s' % setting_name.replace(' ', '-').replace(
                 '_', '-')
             action = attrs.pop('action', None)
             type = attrs.pop('type', None)
             default = attrs.pop('default', None)
             validator = attrs.pop('validator', None)
             nargs = attrs.pop('nargs', None)
             if (validator is None and default is None and type is None
                     and nargs is None):
                 if action is None or action == 'store_true':
                     action = 'store_true'
                     default = False
                     validator = pulsar.validate_bool
                 elif action == 'store_false':
                     default = True
                     validator = pulsar.validate_bool
             setting = pulsar.Setting(name=setting_name,
                                      desc=attrs.pop('desc', name),
                                      type=type,
                                      flags=attrs.pop('flags', [def_flag]),
                                      action=action,
                                      default=default,
                                      validator=validator,
                                      nargs=nargs)
             settings[setting.name] = as_test_setting(setting)
     attrs['config'] = pulsar.Config(settings=settings)
     return super(TestPluginMeta, cls).__new__(cls, name, bases, attrs)
Example #19
0
class AgileManager(pulsar.Application):
    name = 'agile'
    cfg = pulsar.Config(apps=['agile'],
                        log_level=['pulsar.error', 'info'],
                        log_handlers=['console_name_level_message'],
                        description='Agile release manager',
                        exclude=exclude)

    def monitor_start(self, monitor, exc=None):
        self.cfg.set('workers', 0)

    async def worker_start(self, worker, exc=None):
        if not exc:
            try:
                print(self.script)
                executor = await self.executor(loop=worker._loop)
            except Exception:
                self.logger.exception('Could not initialise')
                worker._loop.call_soon(self.done, 2)
            else:
                if executor.cfg.list_commands:
                    executed = executor.list_commands()
                elif executor.cfg.environ:
                    executed = executor.show_environ()
                else:
                    executed = executor.run()
                fut = ensure_future(executed, loop=worker._loop)
                fut.add_done_callback(self._exit)

    def executor(self, **kw):
        return core.CommandExecutor.create(self.cfg, **kw)

    def done(self, exit_code=None):
        exit_code = exit_code or 0
        if exit_code < 3:
            raise HaltServer(exit_code=exit_code)

    def _exit(self, fut):
        self.done(exit_code=fut.result())
Example #20
0
class DiningPhilosophers(pulsar.Application):
    description = ('Dining philosophers sit at a table around a bowl of '
                   'spaghetti and waits for available forks.')
    cfg = pulsar.Config(workers=5)

    def monitor_start(self, monitor):
        self.not_available_forks = set()

    def worker_start(self, philosopher, exc=None):
        self._loop = philosopher._loop
        self.eaten = 0
        self.thinking = 0
        self.started_waiting = 0
        self.forks = []
        philosopher._loop.call_soon(self.take_action, philosopher)

    def worker_info(self, philosopher, info=None):
        '''Override :meth:`~.Application.worker_info` to provide
        information about the philosopher.'''
        info['philosopher'] = {
            'number': philosopher.number,
            'eaten': self.eaten
        }

    def take_action(self, philosopher):
        '''The ``philosopher`` performs one of these two actions:

        * eat, if it has both forks and than :meth:`release_forks`.
        * try to :meth:`pickup_fork`, if he has less than 2 forks.
        '''
        loop = philosopher._loop
        forks = self.forks
        if forks:
            #
            # Two forks. Eat!
            if len(forks) == 2:
                self.thinking = 0
                self.eaten += 1
                philosopher.logger.info("eating... So far %s times",
                                        self.eaten)
                eat_time = 2 * self.cfg.eating_period * random.random()
                return loop.call_later(eat_time, self.release_forks,
                                       philosopher)
            #
            # One fork only! release fork or try to pick one up one
            elif len(forks) == 1:
                waiting_period = 2 * self.cfg.waiting_period * random.random()
                if self.started_waiting == 0:
                    self.started_waiting = loop.time()
                elif loop.time() - self.started_waiting > waiting_period:
                    philosopher.logger.debug("tired of waiting")
                    return self.release_forks(philosopher)
            #
            # this should never happen
            elif len(forks) > 2:  # pragma    nocover
                philosopher.logger.critical('more than 2 forks!!!')
                return self.release_forks(philosopher)
        else:
            if not self.thinking:
                philosopher.logger.warning('%s thinking...', philosopher.name)
            self.thinking += 1
        self.pickup_fork(philosopher)

    @task
    def pickup_fork(self, philosopher):
        '''The philosopher has less than two forks.

        Check if forks are available.
        '''
        fork = yield from philosopher.send(philosopher.monitor, 'pickup_fork',
                                           philosopher.number)
        if fork:
            forks = self.forks
            if fork in forks:
                philosopher.logger.error('Got fork %s. I already have it',
                                         fork)
            else:
                philosopher.logger.debug('Got fork %s.', fork)
                forks.append(fork)
        philosopher._loop.call_soon(self.take_action, philosopher)

    def release_forks(self, philosopher):
        '''The ``philosopher`` has just eaten and is ready to release both
        forks.

        This method release them, one by one, by sending the ``put_down``
        action to the monitor.
        '''
        forks = self.forks
        self.forks = []
        self.started_waiting = 0
        for fork in forks:
            philosopher.logger.debug('Putting down fork %s', fork)
            philosopher.send('monitor', 'putdown_fork', fork)
        philosopher._loop.call_later(self.cfg.waiting_period, self.take_action,
                                     philosopher)

    def actorparams(self, monitor, params):
        avail = set(range(1, monitor.cfg.workers + 1))
        for philosopher in monitor.managed_actors.values():
            info = philosopher.info
            if info:
                avail.discard(info['philosopher']['number'])
            else:
                avail = None
                break
        number = min(avail) if avail else len(monitor.managed_actors) + 1
        params.update({'name': 'Philosopher %s' % number, 'number': number})
Example #21
0
class SocketServer(pulsar.Application):
    '''A :class:`pulsar.apps.Application` which serve application on a socket.

    It bind a socket to a given address and listen for requests. The request
    handler is constructed from the callable passed during initialisation.

    .. attribute:: address

        The socket address, available once the application has started.
    '''
    name = 'socket'
    address = None
    cfg = pulsar.Config(apps=['socket'])

    def protocol_consumer(self):
        '''Factory of :class:`pulsar.ProtocolConsumer` used by the server.

        By default it returns the :attr:`pulsar.apps.Application.callable`
        attribute.'''
        return self.callable

    def monitor_start(self, monitor):
        '''Create the socket listening to the ``bind`` address.

        If the platform does not support multiprocessing sockets set the
        number of workers to 0.
        '''
        cfg = self.cfg
        loop = monitor.event_loop
        if (not pulsar.platform.has_multiProcessSocket
                or cfg.concurrency == 'thread'):
            cfg.set('workers', 0)
        if not cfg.address:
            raise pulsar.ImproperlyConfigured('Could not open a socket. '
                                              'No address to bind to')
        ssl = None
        if cfg.cert_file or cfg.key_file:
            if cfg.cert_file and not os.path.exists(cfg.cert_file):
                raise ValueError('cert_file "%s" does not exist' %
                                 cfg.cert_file)
            if cfg.key_file and not os.path.exists(cfg.key_file):
                raise ValueError('key_file "%s" does not exist' % cfg.key_file)
            ssl = SSLContext(keyfile=cfg.key_file, certfile=cfg.cert_file)
        address = parse_address(self.cfg.address)
        # First create the sockets
        sockets = yield loop.start_serving(lambda: None, *address)
        addresses = []
        for sock in sockets:
            assert loop.remove_reader(
                sock.fileno()), ("Could not remove reader")
            addresses.append(sock.getsockname())
        monitor.params.sockets = [WrapSocket(s) for s in sockets]
        monitor.params.ssl = ssl
        self.addresses = addresses
        self.address = addresses[0]

    def worker_start(self, worker):
        '''Start the worker by invoking the :meth:`create_server` method.'''
        worker.servers[self.name] = servers = []
        for sock in worker.params.sockets:
            server = self.create_server(worker, sock.sock)
            servers.append(server)

    def worker_stopping(self, worker):
        all = []
        for server in worker.servers[self.name]:
            all.append(server.close_connections())
        return multi_async(all)

    def worker_info(self, worker, info):
        info['sockets'] = sockets = []
        for server in worker.servers.get(self.name, ()):
            address = format_address(server.address)
            sockets.append({
                'address': format_address(server.address),
                'read_timeout': server.timeout,
                'concurrent_connections': server.concurrent_connections,
                'received_connections': server.received
            })

    #   INTERNALS

    def create_server(self, worker, sock, ssl=None):
        '''Create the Server Protocol which will listen for requests. It
uses the :meth:`protocol_consumer` method as the protocol consumer factory.'''
        cfg = self.cfg
        server = TcpServer(worker.event_loop,
                           sock=sock,
                           consumer_factory=self.protocol_consumer(),
                           max_connections=cfg.max_requests,
                           timeout=cfg.keep_alive,
                           name=self.name)
        for event in ('connection_made', 'pre_request', 'post_request',
                      'connection_lost'):
            callback = getattr(cfg, event)
            if callback != pass_through:
                server.bind_event(event, callback)
        server.start_serving(cfg.backlog, sslcontext=worker.params.ssl)
        return server
Example #22
0
import pulsar
from pulsar import validate_list, ensure_future, HaltServer

from . import core
from . import actions  # noqa
from . import plugins  # noqa

exclude = set(pulsar.Config().settings)
exclude.difference_update(('config', 'log_level', 'log_handlers', 'debug'))


class Tasks(core.AgileSetting):
    name = 'tasks'
    nargs = '*'
    validator = validate_list
    default = []
    desc = "tasks to run - For the list of tasks pass -l or --list-tasks"


class ConfigFile(core.AgileSetting):
    name = "config_file"
    flags = ["--config-file"]
    default = "agile.json"
    desc = """\
        Configuration file
        """


class ListTasks(core.AgileSetting):
    name = "list_commands"
    flags = ['-l', '--list-commands']
Example #23
0
class TestSuite(pulsar.Application):
    '''An asynchronous test suite which works like a task queue.

    Each task is a group of test methods in a python TestCase class.

    :parameter modules: An iterable over modules where to look for tests.
        Alternatively it can be a callable returning the iterable over modules.
        For example::

            suite = TestSuite(modules=('regression',
                                       ('examples','tests'),
                                       ('apps','test_*')))

            def get_modules(suite):
                ...

            suite = TestSuite(modules=get_modules)

        If not provided it is set as default to ``["tests"]`` which loads all
        python module from the tests module in a recursive fashion.
        Check the the :class:`.TestLoader` for detailed information.

    :parameter result_class: Optional class for collecting test results.
        By default it used the standard :class:`.TestResult`.
    :parameter plugins: Optional list of :class:`.TestPlugin` instances.
    '''
    name = 'test'
    cfg = pulsar.Config(apps=['test'], loglevel=['none'], plugins=())

    def new_runner(self):
        '''The :class:`.TestRunner` driving test cases.
        '''
        result_class = getattr(self, 'result_class', None)
        stream = pulsar.get_stream(self.cfg)
        runner = TestRunner(self.cfg.plugins, stream, result_class)
        abort_message = runner.configure(self.cfg)
        if abort_message:  # pragma    nocover
            raise ExitTest(str(abort_message))
        self.runner = runner
        return runner

    @lazyproperty
    def loader(self):
        # When config is available load the tests and check what type of
        # action is required.
        modules = self.cfg.get('modules')
        # Create a runner and configure it
        runner = self.new_runner()
        if not modules:
            modules = ['tests']
        if hasattr(modules, '__call__'):
            modules = modules(self)
        return TestLoader(self.root_dir, modules, runner, logger=self.logger)

    def on_config(self, arbiter):
        stream = arbiter.stream
        try:
            loader = self.loader
        except ExitTest as e:
            stream.writeln(str(e))
            return False
        stream = arbiter.stream
        stream.writeln(sys.version)
        if self.cfg.list_labels:  # pragma    nocover
            tags = self.cfg.labels
            if tags:
                s = '' if len(tags) == 1 else 's'
                stream.writeln('\nTest labels for%s %s:' %
                               (s, ', '.join(tags)))
            else:
                stream.writeln('\nAll test labels:')
            stream.writeln('')

            def _tags():
                for tag, mod in loader.testmodules(tags):
                    doc = mod.__doc__
                    if doc:
                        tag = '{0} - {1}'.format(tag, doc)
                    yield tag

            for tag in sorted(_tags()):
                stream.writeln(tag)
            stream.writeln('')
            return False

    def monitor_start(self, monitor):
        '''When the monitor starts load all test classes into the queue'''
        cfg = self.cfg
        workers = min(0, cfg.workers)
        cfg.set('workers', workers)
        loader = self.loader

        tags = self.cfg.labels
        exclude_tags = self.cfg.exclude_labels
        if self.cfg.show_leaks:
            show = show_leaks if self.cfg.show_leaks == 1 else hide_leaks
            self.cfg.set('when_exit', show)
            arbiter = pulsar.arbiter()
            arbiter.cfg.set('when_exit', show)
        try:
            tests = []
            loader.runner.on_start()
            for tag, testcls in loader.testclasses(tags, exclude_tags):
                suite = loader.runner.loadTestsFromTestCase(testcls)
                if suite and suite._tests:
                    tests.append((tag, testcls))
            self._time_start = None
            if tests:
                self.logger.info('loading %s test classes', len(tests))
                monitor._loop.call_soon(Runner, monitor, loader.runner, tests)
            else:  # pragma    nocover
                raise ExitTest('Could not find any tests.')
        except ExitTest as e:  # pragma    nocover
            monitor.stream.writeln(str(e))
            monitor._loop.stop()
        except Exception:  # pragma    nocover
            monitor.logger.critical('Error occurred while starting tests',
                                    exc_info=True)
            monitor._loop.call_soon(self._exit, 3)

    @classmethod
    def create_config(cls, *args, **kwargs):
        cfg = super(TestSuite, cls).create_config(*args, **kwargs)
        if cfg.params.get('plugins') is None:
            cfg.params['plugins'] = ()
        for plugin in cfg.params['plugins']:
            cfg.settings.update(plugin.config.settings)
        return cfg

    def arbiter_params(self):
        params = super(TestSuite, self).arbiter_params()
        params['concurrency'] = self.cfg.concurrency
        return params
Example #24
0
class TestSuite(tasks.TaskQueue):
    '''An asynchronous test suite which works like a task queue.

    Each task is a group of test methods in a python TestCase class.

    :parameter modules: An iterable over modules where to look for tests.
        Alternatively it can be a callable returning the iterable over modules.
        For example::

            suite = TestSuite(modules=('regression',
                                       ('examples','tests'),
                                       ('apps','test_*')))

            def get_modules(suite):
                ...

            suite = TestSuite(modules=get_modules)

        If not provided it is set as default to ``["tests"]`` which loads all
        python module from the tests module in a recursive fashion.
        Check the the :class:`.TestLoader` for detailed information.

    :parameter result_class: Optional class for collecting test results.
        By default it used the standard :class:`.TestResult`.
    :parameter plugins: Optional list of :class:`.TestPlugin` instances.
    '''
    name = 'test'
    cfg = pulsar.Config(apps=('tasks', 'test'),
                        loglevel=['none'],
                        task_paths=['pulsar.apps.test.case'],
                        plugins=())

    def new_runner(self):
        '''The :class:`.TestRunner` driving test cases.
        '''
        if mock is None:  # pragma    nocover
            raise ExitTest('python %s requires mock library for pulsar '
                           'test suite application' % pyver)
        result_class = getattr(self, 'result_class', None)
        stream = pulsar.get_stream(self.cfg)
        runner = TestRunner(self.cfg.plugins, stream, result_class)
        abort_message = runner.configure(self.cfg)
        if abort_message:  # pragma    nocover
            raise ExitTest(str(abort_message))
        self.runner = runner
        return runner

    @lazyproperty
    def loader(self):
        # When config is available load the tests and check what type of
        # action is required.
        modules = self.cfg.get('modules')
        # Create a runner and configure it
        runner = self.new_runner()
        if not modules:
            modules = ['tests']
        if hasattr(modules, '__call__'):
            modules = modules(self)
        return TestLoader(self.root_dir, modules, runner, logger=self.logger)

    def on_config(self, arbiter):
        stream = arbiter.stream
        try:
            loader = self.loader
        except ExitTest as e:
            stream.writeln(str(e))
            return False
        stream = arbiter.stream
        stream.writeln(sys.version)
        if self.cfg.list_labels:  # pragma    nocover
            tags = self.cfg.labels
            if tags:
                s = '' if len(tags) == 1 else 's'
                stream.writeln('\nTest labels for%s %s:' %
                               (s, ', '.join(tags)))
            else:
                stream.writeln('\nAll test labels:')
            stream.writeln('')

            def _tags():
                for tag, mod in loader.testmodules(tags):
                    doc = mod.__doc__
                    if doc:
                        tag = '{0} - {1}'.format(tag, doc)
                    yield tag

            for tag in sorted(_tags()):
                stream.writeln(tag)
            stream.writeln('')
            return False
        elif self.cfg.pep8:
            msg, code = pep8_run(self.cfg.pep8)
            stream.writeln(msg)
            if code:
                sys.exit(code)
            return False

    @task
    def monitor_start(self, monitor):
        '''When the monitor starts load all test classes into the queue'''
        # Create a datastore for this test suite
        if not self.cfg.task_backend:
            server = PulsarDS(bind='127.0.0.1:0',
                              workers=0,
                              key_value_save=[],
                              name='%s_store' % self.name)
            yield server()
            address = 'pulsar://%s:%s' % server.cfg.addresses[0]
        else:
            address = self.cfg.task_backend

        store = create_store(address, pool_size=2, loop=monitor._loop)
        self.get_backend(store)
        loader = self.loader
        tags = self.cfg.labels
        exclude_tags = self.cfg.exclude_labels
        if self.cfg.show_leaks:
            show = show_leaks if self.cfg.show_leaks == 1 else hide_leaks
            self.cfg.set('when_exit', show)
            arbiter = pulsar.arbiter()
            arbiter.cfg.set('when_exit', show)
        try:
            tests = []
            loader.runner.on_start()
            for tag, testcls in loader.testclasses(tags, exclude_tags):
                suite = loader.runner.loadTestsFromTestCase(testcls)
                if suite and suite._tests:
                    tests.append((tag, testcls))
            self._time_start = None
            if tests:
                self.logger.info('loading %s test classes', len(tests))
                monitor.cfg.set('workers', min(self.cfg.workers, len(tests)))
                self._time_start = default_timer()
                queued = []
                self._tests_done = set()
                self._tests_queued = None
                #
                # Bind to the task_done event
                self.backend.bind_event('task_done',
                                        partial(self._test_done, monitor))
                for tag, testcls in tests:
                    r = self.backend.queue_task('test',
                                                testcls=testcls,
                                                tag=tag)
                    queued.append(r)
                queued = yield multi_async(queued)
                self.logger.debug('loaded %s test classes', len(tests))
                self._tests_queued = set(queued)
                yield self._test_done(monitor)
            else:  # pragma    nocover
                raise ExitTest('Could not find any tests.')
        except ExitTest as e:  # pragma    nocover
            monitor.stream.writeln(str(e))
            monitor._loop.stop()
        except Exception:  # pragma    nocover
            monitor.logger.critical('Error occurred while starting tests',
                                    exc_info=True)
            monitor._loop.call_soon(self._exit, 3)

    @classmethod
    def create_config(cls, *args, **kwargs):
        cfg = super(TestSuite, cls).create_config(*args, **kwargs)
        if cfg.params.get('plugins') is None:
            cfg.params['plugins'] = ()
        for plugin in cfg.params['plugins']:
            cfg.settings.update(plugin.config.settings)
        return cfg

    def arbiter_params(self):
        params = super(TestSuite, self).arbiter_params()
        params['concurrency'] = self.cfg.concurrency
        return params

    @task
    def _test_done(self, monitor, task_id=None, exc=None):
        runner = self.runner
        if task_id:
            self._tests_done.add(to_string(task_id))
        if self._tests_queued is not None:
            left = self._tests_queued.difference(self._tests_done)
            if not left:
                tests = yield self.backend.get_tasks(self._tests_done)
                self.logger.info('All tests have finished.')
                time_taken = default_timer() - self._time_start
                for task in tests:
                    runner.add(task.get('result'))
                runner.on_end()
                runner.printSummary(time_taken)
                # Shut down the arbiter
                if runner.result.errors or runner.result.failures:
                    exit_code = 2
                else:
                    exit_code = 0
                monitor._loop.call_soon(self._exit, exit_code)

    def _exit(self, exit_code):
        raise pulsar.HaltServer(exit_code=exit_code)
Example #25
0
class DiningPhilosophers(pulsar.Application):
    description = ('Dining philosophers sit at a table around a bowl of '
                   'spaghetti and waits for available forks.')
    cfg = pulsar.Config(workers=5)

    def monitor_start(self, monitor):
        self.not_available_forks = set()

    def worker_start(self, philosopher):
        self.take_action(philosopher)

    def worker_info(self, philosopher, info=None):
        '''Override :meth:`pulsar.Application.worker_info` to provide
information about the philosopher.'''
        params = philosopher.params
        info['philosopher'] = {'number': params.number, 'eaten': params.eaten}

    def take_action(self, philosopher):
        '''The ``philosopher`` performs one of these two actions:

* eat, if it has both forks and than :meth:`release_forks`.
* try to :meth:`pickup_fork`, if he has less than 2 forks.
'''
        params = philosopher.params
        eaten = params.eaten or 0
        forks = params.forks
        started_waiting = params.started_waiting or 0
        pick_up_fork = True
        if forks:
            max_eat_period = 2 * self.cfg.eating_period
            # Two forks. Eat!
            if len(forks) == 2:
                params.thinking = 0
                eaten += 1
                philosopher.logger.info("%s eating... So far %s times",
                                        philosopher.name, eaten)
                try:
                    time.sleep(max_eat_period * random.random())
                except IOError:
                    pass
                params.eaten = eaten
                pick_up_fork = False
            # One fork only! release fork or try to pick up one
            elif len(forks) == 1:
                waiting_period = 2 * self.cfg.waiting_period * random.random()
                if started_waiting == 0:
                    params.started_waiting = time.time()
                elif time.time() - started_waiting > waiting_period:
                    pick_up_fork = False
            elif len(forks) > 2:
                philosopher.logger.critical('%s has more than 2 forks!!!',
                                            philosopher.name)
                pick_up_fork = False
        else:
            thinking = params.thinking or 0
            if not thinking:
                philosopher.logger.warning('%s thinking...', philosopher.name)
            params.thinking = thinking + 1
        # Take action
        if pick_up_fork:
            self.pickup_fork(philosopher)
        else:
            self.release_forks(philosopher)

    def pickup_fork(self, philosopher):
        '''The philosopher has less than two forks. Check if forks are
available.'''
        right_fork = philosopher.params.number
        return philosopher.send(philosopher.monitor, 'pickup_fork',
                                right_fork).add_callback(
                                    partial(self._continue, philosopher))

    def release_forks(self, philosopher):
        '''The ``philosopher`` has just eaten and is ready to release both
forks. This method release them, one by one, by sending the ``put_down``
action to the monitor.'''
        forks = philosopher.params.forks
        philosopher.params.forks = []
        philosopher.params.started_waiting = 0
        for fork in forks:
            philosopher.logger.debug('Putting down fork %s', fork)
            philosopher.send('monitor', 'putdown_fork', fork)
        # once released all the forks wait for a moment
        time.sleep(self.cfg.waiting_period)
        self._continue(None, philosopher)

    def _continue(self, philosopher, fork):
        if fork:
            forks = philosopher.params.forks
            if fork in forks:
                philosopher.logger.error('Got fork %s. I already have it',
                                         fork)
            else:
                philosopher.logger.debug('Got fork %s.', fork)
                forks.append(fork)
        self.take_action(philosopher)

    def actorparams(self, monitor, params):
        avail = set(range(1, monitor.cfg.workers + 1))
        for philosopher in monitor.managed_actors.values():
            info = philosopher.info
            if info:
                avail.discard(info['philosopher']['number'])
            else:
                avail = None
                break
        number = min(avail) if avail else len(monitor.managed_actors) + 1
        name = 'Philosopher %s' % number
        params.update({'name': name, 'number': number, 'forks': []})
Example #26
0
class UdpSocketServer(SocketServer):
    '''A :class:`.SocketServer` which serves application on a UDP sockets.

    It binds a socket to a given address and listen for requests. The request
    handler is constructed from the callable passed during initialisation.

    .. attribute:: address

        The socket address, available once the application has started.
    '''
    name = 'udpsocket'
    cfg = pulsar.Config(apps=['socket'])

    def protocol_factory(self):
        '''Return the :class:`.DatagramProtocol` factory.
        '''
        return self.cfg.callable

    async def monitor_start(self, monitor):
        '''Create the socket listening to the ``bind`` address.

        If the platform does not support multiprocessing sockets set the
        number of workers to 0.
        '''
        cfg = self.cfg
        loop = monitor._loop
        if (not pulsar.platform.has_multiProcessSocket
                or cfg.concurrency == 'thread'):
            cfg.set('workers', 0)
        if not cfg.address:
            raise pulsar.ImproperlyConfigured('Could not open a socket. '
                                              'No address to bind to')
        address = parse_address(self.cfg.address)
        # First create the sockets
        transport, _ = await loop.create_datagram_endpoint(
            asyncio.DatagramProtocol, address)
        sock = transport.get_extra_info('socket')
        transport._sock = DummySock()
        transport.close()
        self.monitor_sockets(monitor, [sock])

    def actorparams(self, monitor, params):
        params.update({'sockets': monitor.sockets})

    def server_factory(self, *args, **kw):
        '''By default returns a new :class:`.DatagramServer`.
        '''
        return DatagramServer(*args, **kw)

    #   INTERNALS
    async def create_server(self, worker):
        '''Create the Server which will listen for requests.

        :return: the server obtained from :meth:`server_factory`.
        '''
        cfg = self.cfg
        max_requests = cfg.max_requests
        if max_requests:
            max_requests = int(lognormvariate(log(max_requests), 0.2))
        server = self.server_factory(self.protocol_factory(),
                                     worker._loop,
                                     sockets=worker.sockets,
                                     max_requests=max_requests,
                                     name=self.name,
                                     logger=self.logger)
        server.bind_event('stop', lambda _, **kw: worker.stop())
        for event in ('pre_request', 'post_request'):
            callback = getattr(cfg, event)
            if callback != pass_through:
                server.bind_event(event, callback)
        await server.create_endpoint()
        return server
Example #27
0
class SocketServer(pulsar.Application):
    '''A :class:`.Application` which serve application on a socket.

    It bind a socket to a given address and listen for requests. The request
    handler is constructed from the callable passed during initialisation.

    .. attribute:: address

        The socket address, available once the application has started.
    '''
    name = 'socket'
    cfg = pulsar.Config(apps=['socket'])

    def protocol_factory(self):
        '''Factory of :class:`.ProtocolConsumer` used by the server.

        By default it returns the :meth:`.Application.callable`.
        '''
        return partial(Connection, self.cfg.callable)

    async def monitor_start(self, monitor):
        '''Create the socket listening to the ``bind`` address.

        If the platform does not support multiprocessing sockets set the
        number of workers to 0.
        '''
        cfg = self.cfg
        loop = monitor._loop
        if (not pulsar.platform.has_multiProcessSocket
                or cfg.concurrency == 'thread'):
            cfg.set('workers', 0)
        if not cfg.address:
            raise ImproperlyConfigured('Could not open a socket. '
                                       'No address to bind to')
        address = parse_address(self.cfg.address)
        if cfg.cert_file or cfg.key_file:
            if not ssl:
                raise RuntimeError('No support for ssl')
            if cfg.cert_file and not os.path.exists(cfg.cert_file):
                raise ImproperlyConfigured('cert_file "%s" does not exist' %
                                           cfg.cert_file)
            if cfg.key_file and not os.path.exists(cfg.key_file):
                raise ImproperlyConfigured('key_file "%s" does not exist' %
                                           cfg.key_file)
        # First create the sockets
        try:
            server = await loop.create_server(asyncio.Protocol, *address)
        except socket.error as e:
            raise ImproperlyConfigured(e)
        else:
            self.monitor_sockets(monitor, server.sockets)

    def monitor_sockets(self, monitor, sockets):
        addresses = []
        loop = monitor._loop
        for sock in sockets:
            addresses.append(sock.getsockname())
            fd = sock.fileno()
            loop.remove_reader(fd)
        monitor.sockets = sockets
        self.cfg.addresses = addresses

    def actorparams(self, monitor, params):
        params['sockets'] = monitor.sockets

    async def worker_start(self, worker, exc=None):
        '''Start the worker by invoking the :meth:`create_server` method.
        '''
        if not exc:
            server = await self.create_server(worker)
            server.bind_event('stop', lambda _, **kw: worker.stop())
            worker.servers[self.name] = server

    async def worker_stopping(self, worker, exc=None):
        server = worker.servers.get(self.name)
        if server:
            await server.close()
        close = getattr(self.cfg.callable, 'close', None)
        if hasattr(close, '__call__'):
            try:
                await as_coroutine(close())
            except Exception:
                pass

    def worker_info(self, worker, info):
        server = worker.servers.get(self.name)
        if server:
            info['%sserver' % self.name] = server.info()
        return info

    def server_factory(self, *args, **kw):
        '''Create a :class:`.TcpServer`.
        '''
        return TcpServer(*args, **kw)

    #   INTERNALS
    async def create_server(self, worker):
        '''Create the Server which will listen for requests.

        :return: a :class:`.TcpServer`.
        '''
        sockets = worker.sockets
        cfg = self.cfg
        max_requests = cfg.max_requests
        if max_requests:
            max_requests = int(lognormvariate(log(max_requests), 0.2))
        server = self.server_factory(self.protocol_factory(),
                                     worker._loop,
                                     sockets=sockets,
                                     max_requests=max_requests,
                                     keep_alive=cfg.keep_alive,
                                     name=self.name,
                                     logger=self.logger)
        for event in ('connection_made', 'pre_request', 'post_request',
                      'connection_lost'):
            callback = getattr(cfg, event)
            if callback != pass_through:
                server.bind_event(event, callback)
        await server.start_serving(cfg.backlog, sslcontext=self.sslcontext())
        return server

    def sslcontext(self):
        cfg = self.cfg
        if cfg.cert_file and cfg.key_file:
            ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
            ctx.load_cert_chain(certfile=cfg.cert_file, keyfile=cfg.key_file)
            return ctx
Example #28
0
class TestSuite(pulsar.Application):
    '''An asynchronous test suite which works like a task queue.

    Each task is a group of test methods in a python TestCase class.

    :parameter modules: An iterable over modules where to look for tests.
        If not provided it is set as default to ``["tests"]`` which loads all
        python module from the tests module in a recursive fashion.
        Check the the :class:`.TestLoader` for detailed information.
    :parameter plugins: Optional list of dotted path to
        :class:`.TestPlugin` classes.
    '''
    name = 'test'
    cfg = pulsar.Config(description='pulsar test suite',
                        apps=['test'],
                        log_level=['none'])

    @lazyproperty
    def loader(self):
        """Instance of the :class:`.TestLoader` used for loading test cases
        """
        return TestLoader(self)

    def on_config(self, arbiter):
        loader = self.loader
        stream = loader.stream
        if loader.abort_message:
            stream.writeln(str(loader.abort_message))
            return False

        stream.writeln(sys.version)
        if self.cfg.list_labels:  # pragma    nocover
            tags = self.cfg.labels
            if tags:
                s = '' if len(tags) == 1 else 's'
                stream.writeln('\nTest labels for%s %s:' %
                               (s, ', '.join(tags)))
            else:
                stream.writeln('\nAll test labels:')
            stream.writeln('')
            for tag in loader.tags(tags, self.cfg.exclude_labels):
                stream.writeln(tag)
            stream.writeln('')
            return False

        elif self.cfg.coveralls:  # pragma nocover
            from pulsar.apps.test.cov import coveralls
            coveralls()
            return False

    def monitor_start(self, monitor):
        '''When the monitor starts load all test classes into the queue'''
        self.cfg.set('workers', 0)

        if self.cfg.callable:
            self.cfg.callable()

        monitor._loop.call_soon(Runner, monitor, self)

    @classmethod
    def create_config(cls, *args, **kwargs):
        cfg = super().create_config(*args, **kwargs)
        for plugin in cfg.test_plugins:
            cfg.settings.update(plugin.config.settings)
        return cfg

    def arbiter_params(self):
        params = super().arbiter_params()
        params['concurrency'] = self.cfg.concurrency
        return params
Example #29
0
class TaskQueue(pulsar.Application):
    '''A pulsar :class:`.Application` for consuming :class:`.Task`.

    This application can also schedule periodic tasks when the
    :ref:`schedule_periodic <setting-schedule_periodic>` flag is ``True``.
    '''
    backend = None
    '''The :class:`.TaskBackend` for this task queue.

    Available once the :class:`.TaskQueue` has started.
    '''
    name = 'tasks'
    cfg = pulsar.Config(apps=('tasks', ), timeout=600)

    @task
    def monitor_start(self, monitor):
        '''Starts running the task queue in ``monitor``.

        It calls the :attr:`.Application.callable` (if available)
        and create the :attr:`~.TaskQueue.backend`.
        '''
        if self.cfg.callable:
            self.cfg.callable()
        connection_string = (self.cfg.task_backend or self.cfg.data_store
                             or DEFAULT_TASK_BACKEND)
        store = yield start_store(connection_string, loop=monitor._loop)
        self.get_backend(store)

    def monitor_task(self, monitor):
        '''Override the :meth:`~.Application.monitor_task` callback.

        Check if the :attr:`~.TaskQueue.backend` needs to schedule new tasks.
        '''
        if self.backend and monitor.is_running():
            if self.backend.next_run <= time.time():
                self.backend.tick()

    def monitor_stopping(self, monitor, exc=None):
        if self.backend:
            self.backend.close()

    def worker_start(self, worker, exc=None):
        if not exc:
            self.get_backend().start(worker)

    def worker_stopping(self, worker, exc=None):
        if self.backend:
            return self.backend.close()

    def actorparams(self, monitor, params):
        # makes sure workers are only consuming tasks, not scheduling.
        cfg = params['cfg']
        cfg.set('schedule_periodic', False)

    def worker_info(self, worker, info=None):
        be = self.backend
        if be:
            tasks = {
                'concurrent': list(be.concurrent_tasks),
                'processed': be.processed
            }
            info['tasks'] = tasks

    def get_backend(self, store=None):
        if self.backend is None:
            if store is None:
                store = create_store(self.cfg.task_backend)
            else:
                self.cfg.set('task_backend', store.dns)
            task_backend = task_backends.get(store.name)
            if not task_backend:
                raise pulsar.ImproperlyConfigured(
                    'Task backend for %s not available' % store.name)
            self.backend = task_backend(
                store,
                logger=self.logger,
                name=self.name,
                task_paths=self.cfg.task_paths,
                schedule_periodic=self.cfg.schedule_periodic,
                max_tasks=self.cfg.max_requests,
                backlog=self.cfg.concurrent_tasks)
            self.logger.debug('created %s', self.backend)
        return self.backend
Example #30
0
class Goblin(pulsar.Application):
    cfg = pulsar.Config(workers=2)

    def monitor_start(self, monitor):
        """Setup message queues"""
        # This lives in the monitor context
        # Queue incoming messages from rpc service
        self.incoming_queue = asyncio.Queue(maxsize=250)
        # These queues hold response data that can be asynchronously read
        # by the rpc service
        self.response_queues = {}

    @asyncio.coroutine
    def add_task(self, request_id, method, blob):
        """Adzd a task to the incoming task queue"""
        self.response_queues[request_id] = asyncio.Queue()
        yield from self.incoming_queue.put((request_id, method, blob))

    @asyncio.coroutine
    def read_response(self, request_id):
        """This method allows the rpc service to read from the response queues
           maintained by the app."""
        try:
            queue = self.response_queues[request_id]
        except KeyError:
            raise KeyError("Bad request id")
        else:
            resp = yield from queue.get()
            if resp is None:
                del self.response_queues[request_id]
            return resp

    def worker_start(self, worker, exc=None):
        """Setup the global goblin variables, then start asking the monitor
           for tasks..."""
        worker.pool = aiohttp_client.Pool("ws://localhost:8182",
                                          future_class=asyncio.Future,
                                          loop=worker._loop,
                                          force_release=True)
        # check the queue periodically for tasks...
        worker._loop.call_soon(self.start_working, worker)

    def worker_stopping(self, worker, exc=None):
        """Close the connection pool for this process"""
        worker._loop.call_soon(pulsar.ensure_future, worker.pool.close())

    def start_working(self, worker):
        """Don't be lazy"""
        pulsar.ensure_future(self.run(worker))

    @asyncio.coroutine
    def run(self, worker):
        """Try to get tasks from the monitor. If tasks are available process
           using same worker, if not, wait a second, and ask again...
           BE PERSISTENT!"""
        request_id, method, blob = yield from worker.send(
            worker.monitor, 'get_task')
        if request_id and method and blob:
            yield from pulsar.send(
                worker.aid, 'process_task', request_id, method, blob)
        worker._loop.call_later(1, self.start_working, worker)

    def get_task(self):
        """Check for tasks, if available, pass data to calling worker for
           processing..."""
        try:
            request_id, method, blob = self.incoming_queue.get_nowait()
        except asyncio.QueueEmpty:
            LOGGER.debug("No tasks available :( :( :(")
            return None, None, None
        else:
            return request_id, method, blob