Ejemplo n.º 1
0
 async def test_buggy_listener(self, loop, console, results):
     listener = MyBuggyListener()
     eventer = EventSender(console)
     eventer.add_listener(listener)
     await eventer.send_event("my_event")
     resp = await serialize(console)
     self.assertTrue("Bam" in resp)
Ejemplo n.º 2
0
    async def test_buggy_listener(self, loop, console, results):
        class MyListener(BaseListener):
            def on_my_event(self, **options):
                raise Exception("Bam")

        listener = MyListener()
        eventer = EventSender(console)
        eventer.add_listener(listener)
        await eventer.send_event("my_event")
        resp = await serialize(console)
        self.assertTrue("Bam" in resp)
Ejemplo n.º 3
0
 def __init__(self, args, loop=None):
     self.args = args
     self.console = self.args.shared_console
     if loop is None:
         loop = asyncio.get_event_loop()
     self.loop = loop
     # the stastd client gets initialized after we fork
     # processes in case -p was used
     self.statsd = None
     self._tasks = []
     self._procs = []
     self._results = SharedCounters('WORKER', 'REACHED', 'RATIO', 'OK',
                                    'FAILED', 'MINUTE_OK', 'MINUTE_FAILED')
     self.eventer = EventSender(self.console)
Ejemplo n.º 4
0
 def __init__(self, loop, console, verbose, statsd):
     super().__init__(
         trace_config_ctx_factory=self._trace_config_ctx_factory)
     self.loop = loop
     self.console = console
     self.verbose = verbose
     self.eventer = EventSender(
         console,
         [
             StdoutListener(
                 verbose=self.verbose, console=self.console, loop=self.loop)
         ],
     )
     self.on_request_start.append(self._request_start)
     self.on_request_end.append(self._request_end)
     self.context = namedtuple("context", ["statsd"])
     self.context.statsd = statsd
Ejemplo n.º 5
0
    async def test_add_listener(self, loop, console, results):
        class MyListener(BaseListener):
            def __init__(self):
                self.fired = False
                self.value = None

            def on_my_event(self, **options):
                self.fired = True
                self.value = options["value"]

        listener = MyListener()
        eventer = EventSender(console)
        eventer.add_listener(listener)
        await eventer.send_event("my_event", value=42)
        await serialize(console)

        self.assertTrue(listener.fired)
        self.assertEqual(listener.value, 42)
Ejemplo n.º 6
0
class SessionTracer(TraceConfig):
    def __init__(self, loop, console, verbose, statsd):
        super().__init__(
            trace_config_ctx_factory=self._trace_config_ctx_factory)
        self.loop = loop
        self.console = console
        self.verbose = verbose
        self.eventer = EventSender(
            console,
            [
                StdoutListener(
                    verbose=self.verbose, console=self.console, loop=self.loop)
            ],
        )
        self.on_request_start.append(self._request_start)
        self.on_request_end.append(self._request_end)
        self.context = namedtuple("context", ["statsd"])
        self.context.statsd = statsd

    def _trace_config_ctx_factory(self, trace_request_ctx):
        return SimpleNamespace(trace_request_ctx=trace_request_ctx,
                               context=self.context)

    def add_listener(self, listener):
        return self.eventer.add_listener(listener)

    async def send_event(self, event, **options):
        await self.eventer.send_event(event, session=self, **options)

    async def _request_start(self, session, trace_config_ctx, params):
        if self.context.statsd:
            prefix = "molotov.%(hostname)s.%(method)s.%(host)s.%(path)s"
            data = {
                "method": params.method,
                "hostname": _HOST,
                "host": params.url.host,
                "path": params.url.path,
            }
            label = prefix % data
            trace_config_ctx.start = perf_counter()
            trace_config_ctx.label = label
            trace_config_ctx.data = data

    async def _request_end(self, session, trace_config_ctx, params):
        if self.context.statsd:
            duration = int((perf_counter() - trace_config_ctx.start) * 1000)
            self.context.statsd.timing(trace_config_ctx.label, value=duration)
            self.context.statsd.increment(trace_config_ctx.label + "." +
                                          str(params.response.status))
        await self.send_event(
            "response_received",
            response=params.response,
            request=params.response.request,
        )
Ejemplo n.º 7
0
 def __init__(self, args, loop=None):
     self.args = args
     self.console = self.args.shared_console
     if loop is None:
         loop = asyncio.get_event_loop()
     self.loop = loop
     # the stastd client gets initialized after we fork
     # processes in case -p was used
     self.statsd = None
     self._tasks = []
     self._procs = []
     self._results = SharedCounters('WORKER', 'REACHED', 'RATIO', 'OK',
                                    'FAILED', 'MINUTE_OK', 'MINUTE_FAILED')
     self.eventer = EventSender(self.console)
Ejemplo n.º 8
0
 def __init__(self, args, loop=None):
     self.args = args
     self.console = self.args.shared_console
     if loop is None:
         loop = asyncio.get_event_loop()
     self.loop = loop
     # the stastd client gets initialized after we fork
     # processes in case -p was used
     self.statsd = None
     self._tasks = []
     self._procs = []
     self._results = SharedCounters(
         "WORKER",
         "REACHED",
         "RATIO",
         "OK",
         "FAILED",
         "MINUTE_OK",
         "MINUTE_FAILED",
         "MAX_WORKERS",
         "SETUP_FAILED",
         "SESSION_SETUP_FAILED",
     )
     self.eventer = EventSender(self.console)
Ejemplo n.º 9
0
 def __init__(self,
              wid,
              results,
              console,
              args,
              statsd=None,
              delay=0,
              loop=None):
     self.wid = wid
     self.results = results
     self.console = console
     self.loop = loop or asyncio.get_event_loop()
     self.args = args
     self.statsd = statsd
     self.delay = delay
     self.eventer = EventSender(console)
Ejemplo n.º 10
0
 def __init__(self, wid, results, console, args, statsd=None,
              delay=0, loop=None):
     self.wid = wid
     self.results = results
     self.console = console
     self.loop = loop or asyncio.get_event_loop()
     self.args = args
     self.statsd = statsd
     self.delay = delay
     self.count = 0
     self.worker_start = 0
     self.eventer = EventSender(console)
     # fixtures
     self._session_setup = get_fixture('setup_session')
     self._session_teardown = get_fixture('teardown_session')
     self._setup = get_fixture('setup')
     self._teardown = get_fixture('teardown')
Ejemplo n.º 11
0
 def __init__(self, loop, console, verbose=0, statsd=None, **kw):
     connector = kw.pop('connector', None)
     if connector is None:
         connector = TCPConnector(loop=loop, limit=None)
     super(LoggedClientSession,
           self).__init__(loop=loop,
                          request_class=LoggedClientRequest,
                          response_class=LoggedClientResponse,
                          connector=connector,
                          **kw)
     self.console = console
     self.request_class = LoggedClientRequest
     self.request_class.verbose = verbose
     self.verbose = verbose
     self.request_class.session = self
     self.request_class.response_class = LoggedClientResponse
     self.statsd = statsd
     self.eventer = EventSender(
         console,
         [StdoutListener(verbose=self.verbose, console=self.console)])
Ejemplo n.º 12
0
class Runner(object):
    """Manages processes & workers and grabs results.
    """

    def __init__(self, args, loop=None):
        self.args = args
        self.console = self.args.shared_console
        if loop is None:
            loop = asyncio.get_event_loop()
        self.loop = loop
        # the stastd client gets initialized after we fork
        # processes in case -p was used
        self.statsd = None
        self._tasks = []
        self._procs = []
        self._results = SharedCounters(
            "WORKER", "REACHED", "RATIO", "OK", "FAILED", "MINUTE_OK", "MINUTE_FAILED"
        )
        self.eventer = EventSender(self.console)

    def _set_statsd(self):
        if self.args.statsd:
            self.statsd = get_statsd_client(self.args.statsd_address)
        else:
            self.statsd = None

    def run_coro(self, coro):
        if not self.loop.is_running():
            raise Exception("Loop is not running")
        future = asyncio.run_coroutine_threadsafe(coro, self.loop)
        return future.result()

    def gather(self, *futures):
        return asyncio.gather(*futures, loop=self.loop, return_exceptions=True)

    def ensure_future(self, coro):
        return asyncio.ensure_future(coro, loop=self.loop)

    def __call__(self):
        global_setup = get_fixture("global_setup")
        if global_setup is not None:
            try:
                global_setup(self.args)
            except Exception as e:
                self.console.print("The global_setup() fixture failed")
                self.console.print_error(e)
                raise

        try:
            return self._launch_processes()
        finally:
            global_teardown = get_fixture("global_teardown")
            if global_teardown is not None:
                try:
                    global_teardown()
                except Exception as e:
                    # we can't stop the teardown process
                    self.console.print_error(e)

    def _launch_processes(self):
        args = self.args
        signal.signal(signal.SIGINT, self._shutdown)
        signal.signal(signal.SIGTERM, self._shutdown)
        args.original_pid = os.getpid()

        if args.processes > 1:
            if not args.quiet:
                self.console.print("Forking %d processes" % args.processes)
            jobs = []
            for i in range(args.processes):
                p = multiprocessing.Process(target=self._process)
                jobs.append(p)
                p.start()

            for job in jobs:
                self._procs.append(job)

            async def run(quiet, console):
                while len(self._procs) > 0:
                    if not quiet:
                        console.print(self.display_results(), end="\r")
                    for job in jobs:
                        if job.exitcode is not None and job in self._procs:
                            self._procs.remove(job)
                    await cancellable_sleep(args.console_update)
                await self.console.stop()
                await self.eventer.stop()

            tasks = [
                self.ensure_future(self.console.display()),
                self.ensure_future(self._send_workers_event(1)),
                self.ensure_future(run(args.quiet, self.console)),
            ]
            self.loop.run_until_complete(self.gather(*tasks))
        else:
            self._process()

        return self._results

    def _shutdown(self, signal, frame):
        stop()
        self._kill_tasks()
        # send sigterms
        for proc in self._procs:
            proc.terminate()

    def _runner(self):
        args = self.args

        def _prepare():
            tasks = []
            delay = 0
            if args.ramp_up > 0.0:
                step = args.ramp_up / args.workers
            else:
                step = 0.0
            for i in range(self.args.workers):
                worker = Worker(
                    i,
                    self._results,
                    self.console,
                    self.args,
                    self.statsd,
                    delay,
                    self.loop,
                )
                f = self.ensure_future(worker.run())
                tasks.append(f)
                delay += step
            return tasks

        if self.args.quiet:
            return _prepare()
        else:
            msg = "Preparing {} worker{}"
            msg = msg.format(args.workers, "s" if args.workers > 1 else "")
            return self.console.print_block(msg, _prepare)

    def _process(self):
        set_timer()

        # coroutine that will kill everything when duration is up
        if self.args.duration and self.args.force_shutdown:

            async def _duration_killer():
                cancelled = object()
                res = await cancellable_sleep(self.args.duration, result=cancelled)
                if res is cancelled or (res and not res.canceled()):
                    self._shutdown(None, None)
                    await asyncio.sleep(0)

            _duration_killer = self.ensure_future(_duration_killer())
        else:
            _duration_killer = None

        if self.args.processes > 1:
            signal.signal(signal.SIGINT, self._shutdown)
            signal.signal(signal.SIGTERM, self._shutdown)
            self.loop = asyncio.new_event_loop()
            asyncio.set_event_loop(self.loop)

        if self.args.debug:
            self.console.print("**** RUNNING IN DEBUG MODE == SLOW ****")
            self.loop.set_debug(True)

        self._set_statsd()
        if self.statsd is not None:
            self._tasks.append(self.ensure_future(self.statsd.connect()))

        if self.args.original_pid == os.getpid():
            self._tasks.append(self.ensure_future(self._send_workers_event(1)))
            if not self.args.quiet:
                fut = self._display_results(self.args.console_update)
                update = self.ensure_future(fut)
                display = self.ensure_future(self.console.display())
                display = self.gather(update, display)
                self._tasks.append(display)

        workers = self.gather(*self._runner())

        def _stop(cb):
            if _duration_killer is not None:
                if not _duration_killer.done():
                    _duration_killer.cancel()
            stop()

        workers.add_done_callback(_stop)
        self._tasks.append(workers)

        try:
            self.loop.run_until_complete(self.gather(*self._tasks))
        finally:
            if self.statsd is not None:
                self.loop.run_until_complete(self.ensure_future(self.statsd.close()))
            self._kill_tasks()
            self.loop.close()

    def _kill_tasks(self):
        cancellable_sleep.cancel_all()
        for task in reversed(self._tasks):
            with suppress(asyncio.CancelledError):
                task.cancel()
        for task in self._tasks:
            del task
        self._tasks[:] = []

    def display_results(self):
        ok, fail = self._results["OK"].value, self._results["FAILED"].value
        workers = self._results["WORKER"].value
        pat = "SUCCESSES: %s | FAILURES: %s | WORKERS: %s"
        return pat % (ok, fail, workers)

    async def _display_results(self, update_interval):
        while not is_stopped():
            self.console.print(self.display_results(), end="\r")
            await cancellable_sleep(update_interval)
        await self.console.stop()

    async def _send_workers_event(self, update_interval):
        while not self.eventer.stopped() and not is_stopped():
            workers = self._results["WORKER"].value
            await self.eventer.send_event("current_workers", workers=workers)
            await cancellable_sleep(update_interval)
Ejemplo n.º 13
0
class Runner(object):
    """Manages processes & workers and grabs results.
    """
    def __init__(self, args, loop=None):
        self.args = args
        self.console = self.args.shared_console
        if loop is None:
            loop = asyncio.get_event_loop()
        self.loop = loop
        # the stastd client gets initialized after we fork
        # processes in case -p was used
        self.statsd = None
        self._tasks = []
        self._procs = []
        self._results = SharedCounters('WORKER', 'REACHED', 'RATIO', 'OK',
                                       'FAILED', 'MINUTE_OK', 'MINUTE_FAILED')
        self.eventer = EventSender(self.console)

    def _set_statsd(self):
        if self.args.statsd:
            self.statsd = get_statsd_client(self.args.statsd_address,
                                            loop=self.loop)
        else:
            self.statsd = None

    def gather(self, *futures):
        return asyncio.gather(*futures, loop=self.loop, return_exceptions=True)

    def ensure_future(self, coro):
        return asyncio.ensure_future(coro, loop=self.loop)

    def __call__(self):
        global_setup = get_fixture('global_setup')
        if global_setup is not None:
            try:
                global_setup(self.args)
            except Exception as e:
                self.console.print("The global_setup() fixture failed")
                self.console.print_error(e)
                raise

        try:
            return self._launch_processes()
        finally:
            global_teardown = get_fixture('global_teardown')
            if global_teardown is not None:
                try:
                    global_teardown()
                except Exception as e:
                    # we can't stop the teardown process
                    self.console.print_error(e)

    def _launch_processes(self):
        args = self.args
        signal.signal(signal.SIGINT, self._shutdown)
        signal.signal(signal.SIGTERM, self._shutdown)
        args.original_pid = os.getpid()

        if args.processes > 1:
            if not args.quiet:
                self.console.print('Forking %d processes' % args.processes)
            jobs = []
            for i in range(args.processes):
                p = multiprocessing.Process(target=self._process)
                jobs.append(p)
                p.start()

            for job in jobs:
                self._procs.append(job)

            async def run(quiet, console):
                while len(self._procs) > 0:
                    if not quiet:
                        console.print(self.display_results(), end='\r')
                    for job in jobs:
                        if job.exitcode is not None and job in self._procs:
                            self._procs.remove(job)
                    await cancellable_sleep(args.console_update)
                await self.console.stop()
                await self.eventer.stop()

            tasks = [self.ensure_future(self.console.display()),
                     self.ensure_future(self._send_workers_event(1)),
                     self.ensure_future(run(args.quiet, self.console))]
            self.loop.run_until_complete(self.gather(*tasks))
        else:
            self._process()

        return self._results

    def _shutdown(self, signal, frame):
        stop()
        self._kill_tasks()
        # send sigterms
        for proc in self._procs:
            proc.terminate()

    def _runner(self):
        args = self.args

        def _prepare():
            tasks = []
            delay = 0
            if args.ramp_up > 0.:
                step = args.ramp_up / args.workers
            else:
                step = 0.
            for i in range(self.args.workers):
                worker = Worker(i, self._results, self.console, self.args,
                                self.statsd, delay, self.loop)
                f = self.ensure_future(worker.run())
                tasks.append(f)
                delay += step
            return tasks

        if self.args.quiet:
            return _prepare()
        else:
            msg = 'Preparing {} worker{}'
            msg = msg.format(args.workers, 's' if args.workers > 1 else '')
            return self.console.print_block(msg, _prepare)

    def _process(self):
        set_timer()
        if self.args.processes > 1:
            signal.signal(signal.SIGINT, self._shutdown)
            signal.signal(signal.SIGTERM, self._shutdown)
            self.loop = asyncio.new_event_loop()
            asyncio.set_event_loop(self.loop)

        if self.args.debug:
            self.console.print('**** RUNNING IN DEBUG MODE == SLOW ****')
            self.loop.set_debug(True)

        self._set_statsd()

        if self.args.original_pid == os.getpid():
            self._tasks.append(self.ensure_future(self._send_workers_event(1)))
            if not self.args.quiet:
                fut = self._display_results(self.args.console_update)
                update = self.ensure_future(fut)
                display = self.ensure_future(self.console.display())
                display = self.gather(update, display)
                self._tasks.append(display)

        workers = self.gather(*self._runner())
        workers.add_done_callback(lambda fut: stop())
        self._tasks.append(workers)

        try:
            self.loop.run_until_complete(self.gather(*self._tasks))
        finally:
            self._kill_tasks()
            if self.statsd is not None:
                self.statsd.close()
            self.loop.close()

    def _kill_tasks(self):
        cancellable_sleep.cancel_all()
        for task in reversed(self._tasks):
            with suppress(asyncio.CancelledError):
                task.cancel()
        for task in self._tasks:
            del task
        self._tasks[:] = []

    def display_results(self):
        ok, fail = self._results['OK'].value, self._results['FAILED'].value
        workers = self._results['WORKER'].value
        pat = 'SUCCESSES: %s | FAILURES: %s | WORKERS: %s'
        return pat % (ok, fail, workers)

    async def _display_results(self, update_interval):
        while not is_stopped():
            self.console.print(self.display_results(), end='\r')
            await cancellable_sleep(update_interval)
        await self.console.stop()

    async def _send_workers_event(self, update_interval):
        while not self.eventer.stopped() and not is_stopped():
            workers = self._results['WORKER'].value
            await self.eventer.send_event('current_workers', workers=workers)
            await cancellable_sleep(update_interval)
Ejemplo n.º 14
0
class Runner(object):
    """Manages processes & workers and grabs results.
    """
    def __init__(self, args, loop=None):
        self.args = args
        self.console = self.args.shared_console
        if loop is None:
            loop = asyncio.get_event_loop()
        self.loop = loop
        # the stastd client gets initialized after we fork
        # processes in case -p was used
        self.statsd = None
        self._tasks = []
        self._procs = []
        self._results = SharedCounters('WORKER', 'REACHED', 'RATIO', 'OK',
                                       'FAILED', 'MINUTE_OK', 'MINUTE_FAILED')
        self.eventer = EventSender(self.console)

    def _set_statsd(self):
        if self.args.statsd:
            self.statsd = get_statsd_client(self.args.statsd_address,
                                            loop=self.loop)
        else:
            self.statsd = None

    def gather(self, *futures):
        return asyncio.gather(*futures, loop=self.loop, return_exceptions=True)

    def ensure_future(self, coro):
        return asyncio.ensure_future(coro, loop=self.loop)

    def __call__(self):
        global_setup = get_fixture('global_setup')
        if global_setup is not None:
            try:
                global_setup(self.args)
            except Exception as e:
                self.console.print("The global_setup() fixture failed")
                self.console.print_error(e)
                raise

        try:
            return self._launch_processes()
        finally:
            global_teardown = get_fixture('global_teardown')
            if global_teardown is not None:
                try:
                    global_teardown()
                except Exception as e:
                    # we can't stop the teardown process
                    self.console.print_error(e)

    def _launch_processes(self):
        args = self.args
        signal.signal(signal.SIGINT, self._shutdown)
        signal.signal(signal.SIGTERM, self._shutdown)
        args.original_pid = os.getpid()

        if args.processes > 1:
            if not args.quiet:
                self.console.print('Forking %d processes' % args.processes)
            jobs = []
            for i in range(args.processes):
                p = multiprocessing.Process(target=self._process)
                jobs.append(p)
                p.start()

            for job in jobs:
                self._procs.append(job)

            async def run(quiet, console):
                while len(self._procs) > 0:
                    if not quiet:
                        console.print(self.display_results(), end='\r')
                    for job in jobs:
                        if job.exitcode is not None and job in self._procs:
                            self._procs.remove(job)
                    await cancellable_sleep(args.console_update)
                await self.console.stop()
                await self.eventer.stop()

            tasks = [self.ensure_future(self.console.display()),
                     self.ensure_future(self._send_workers_event(1)),
                     self.ensure_future(run(args.quiet, self.console))]
            self.loop.run_until_complete(self.gather(*tasks))
        else:
            self._process()

        return self._results

    def _shutdown(self, signal, frame):
        stop()
        self._kill_tasks()
        # send sigterms
        for proc in self._procs:
            proc.terminate()

    def _runner(self):
        args = self.args

        def _prepare():
            tasks = []
            delay = 0
            if args.ramp_up > 0.:
                step = args.ramp_up / args.workers
            else:
                step = 0.
            for i in range(self.args.workers):
                worker = Worker(i, self._results, self.console, self.args,
                                self.statsd, delay, self.loop)
                f = self.ensure_future(worker.run())
                tasks.append(f)
                delay += step
            return tasks

        if self.args.quiet:
            return _prepare()
        else:
            msg = 'Preparing {} worker{}'
            msg = msg.format(args.workers, 's' if args.workers > 1 else '')
            return self.console.print_block(msg, _prepare)

    def _process(self):
        set_timer()

        # coroutine that will kill everything when duration is up
        if self.args.duration and self.args.force_shutdown:
            async def _duration_killer():
                cancelled = object()
                res = await cancellable_sleep(self.args.duration,
                                              result=cancelled)
                if res is cancelled or (res and not res.canceled()):
                    self._shutdown(None, None)
                    await asyncio.sleep(0)

            _duration_killer = self.ensure_future(_duration_killer())
        else:
            _duration_killer = None

        if self.args.processes > 1:
            signal.signal(signal.SIGINT, self._shutdown)
            signal.signal(signal.SIGTERM, self._shutdown)
            self.loop = asyncio.new_event_loop()
            asyncio.set_event_loop(self.loop)

        if self.args.debug:
            self.console.print('**** RUNNING IN DEBUG MODE == SLOW ****')
            self.loop.set_debug(True)

        self._set_statsd()

        if self.args.original_pid == os.getpid():
            self._tasks.append(self.ensure_future(self._send_workers_event(1)))
            if not self.args.quiet:
                fut = self._display_results(self.args.console_update)
                update = self.ensure_future(fut)
                display = self.ensure_future(self.console.display())
                display = self.gather(update, display)
                self._tasks.append(display)

        workers = self.gather(*self._runner())

        def _stop(cb):
            if _duration_killer is not None:
                if not _duration_killer.done():
                    _duration_killer.cancel()
            stop()

        workers.add_done_callback(_stop)
        self._tasks.append(workers)

        try:
            self.loop.run_until_complete(self.gather(*self._tasks))
        finally:
            self._kill_tasks()
            if self.statsd is not None:
                self.statsd.close()
            self.loop.run_until_complete(self.ensure_future(asyncio.sleep(0)))
            self.loop.close()

    def _kill_tasks(self):
        cancellable_sleep.cancel_all()
        for task in reversed(self._tasks):
            with suppress(asyncio.CancelledError):
                task.cancel()
        for task in self._tasks:
            del task
        self._tasks[:] = []

    def display_results(self):
        ok, fail = self._results['OK'].value, self._results['FAILED'].value
        workers = self._results['WORKER'].value
        pat = 'SUCCESSES: %s | FAILURES: %s | WORKERS: %s'
        return pat % (ok, fail, workers)

    async def _display_results(self, update_interval):
        while not is_stopped():
            self.console.print(self.display_results(), end='\r')
            await cancellable_sleep(update_interval)
        await self.console.stop()

    async def _send_workers_event(self, update_interval):
        while not self.eventer.stopped() and not is_stopped():
            workers = self._results['WORKER'].value
            await self.eventer.send_event('current_workers', workers=workers)
            await cancellable_sleep(update_interval)