class DistributedFunctionalTest(TestCase): def setUp(self): start_servers() self.client = Client() @skipIf('TRAVIS' in os.environ, 'Travis') def test_distributed_run(self): start_runner(get_runner_args( fqn='loads.examples.test_blog.TestWebSite.test_something', agents=2, output=['null'], users=1, cycles=10)) runs = self.client.list_runs() data = self.client.get_data(runs.keys()[0]) self.assertTrue(len(data) > 100) @skipIf('TRAVIS' in os.environ, 'Travis') def test_distributed_run_duration(self): args = get_runner_args( fqn='loads.examples.test_blog.TestWebSite.test_something', agents=1, #output=['null'], users=10, duration=1) start_runner(args) time.sleep(1.) runs = self.client.list_runs() try: data = self.client.get_data(runs.keys()[0]) except Exception: data = self.client.get_data(runs.keys()[0]) self.assertTrue(len(data) > 10)
def main(sysargs=None): # parsing the command line args, parser = _parse(sysargs) # loggers setting wslogger = logging.getLogger('ws4py') ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) wslogger.addHandler(ch) set_logger() if args.version: print(__version__) sys.exit(0) if args.ping_broker or args.purge_broker or args.check_cluster: client = Client(args.broker) ping = client.ping() if args.purge_broker: runs = client.purge_broker() if len(runs) == 0: print('Nothing to purge.') else: print('We have %d run(s) right now:' % len(runs)) print('Purged.') sys.exit(0) elif args.ping_broker: print('Broker running on pid %d' % ping['pid']) print('%d agents registered' % len(ping['agents'])) print('endpoints:') for name, location in ping['endpoints'].items(): print(' - %s: %s' % (name, location)) runs = client.list_runs() if len(runs) == 0: print('Nothing is running right now.') else: print('We have %d run(s) right now:' % len(runs)) for run_id, agents in runs.items(): print(' - %s with %d agent(s)' % (run_id, len(agents))) sys.exit(0) elif args.check_cluster: args.fqn = 'loads.examples.test_blog.TestWebSite.test_health' args.agents = len(ping['agents']) args.hits = '1' print('Running a healt check on all %d agents' % args.agents) # if we don't have an fqn or we're not attached, something's wrong if args.fqn is None and not args.attach: parser.print_usage() sys.exit(0) args = dict(args._get_kwargs()) res = run(args) return res
def main(sysargs=None): # parsing the command line args, parser = _parse(sysargs) # loggers setting wslogger = logging.getLogger('ws4py') ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) wslogger.addHandler(ch) set_logger() if args.version: print(__version__) sys.exit(0) if args.ping_broker or args.purge_broker or args.check_cluster: client = Client(args.broker) ping = client.ping() if args.purge_broker: runs = client.purge_broker() if len(runs) == 0: print('Nothing to purge.') else: print('We have %d run(s) right now:' % len(runs)) print('Purged.') sys.exit(0) elif args.ping_broker: print('Broker running on pid %d' % ping['pid']) print('%d agents registered' % len(ping['agents'])) print('endpoints:') for name, location in ping['endpoints'].items(): print(' - %s: %s' % (name, location)) runs = client.list_runs() if len(runs) == 0: print('Nothing is running right now.') else: print('We have %d run(s) right now:' % len(runs)) for run_id, agents in runs.items(): print(' - %s with %d agent(s)' % (run_id, len(agents))) sys.exit(0) elif args.check_cluster: args.fqn = 'loads.examples.test_blog.TestWebSite.test_health' args.agents = len(ping['agents']) args.hits = '1' print('Running a healt check on all %d agents' % args.agents) # if we don't have an fqn or we're not attached, something's wrong if args.fqn is None and not args.attach: parser.print_usage() sys.exit(0) args = dict(args._get_kwargs()) res = run(args) return res
def run(args): is_slave = args.get('slave', False) has_agents = args.get('agents', None) attach = args.get('attach', False) if not attach and (is_slave or not has_agents): if args.get('test_runner', None) is not None: runner = ExternalRunner else: runner = LocalRunner try: return runner(args).execute() except Exception: print traceback.format_exc() raise else: if attach: # find out what's running client = Client(args['broker']) try: runs = client.list_runs() except TimeoutError: logger.info("Can't reach the broker at %r" % args['broker']) client.close() return 1 if len(runs) == 0: logger.info("Nothing seem to be running on that broker.") client.close() return 1 elif len(runs) == 1: run_id, run_data = runs.items()[0] __, started = run_data[-1] else: # we need to pick one raise NotImplementedError() counts = client.get_counts(run_id) events = [event for event, hits in counts] if 'stopTestRun' in events: logger.info("This test has just stopped.") client.close() return 1 metadata = client.get_metadata(run_id) logger.debug('Reattaching run %r' % run_id) started = datetime.utcfromtimestamp(started) runner = DistributedRunner(args) try: return runner.attach(run_id, started, counts, metadata) except KeyboardInterrupt: _detach_question(runner) else: logger.debug('Summoning %d agents' % args['agents']) runner = DistributedRunner(args) try: return runner.execute() except KeyboardInterrupt: _detach_question(runner)
def __init__(self, broker=DEFAULT_FRONTEND, ping_delay=10., ping_retries=3, params=None, timeout=DEFAULT_TIMEOUT_MOVF, max_age=DEFAULT_MAX_AGE, max_age_delta=DEFAULT_MAX_AGE_DELTA): logger.debug('Initializing the agent.') self.debug = logger.isEnabledFor(logging.DEBUG) self.params = params self.pid = os.getpid() self.agent_id = '%s-%s' % (get_hostname(), self.pid) self.timeout = timeout self.max_age = max_age self.max_age_delta = max_age_delta self.env = os.environ.copy() self.running = False self._workers = {} self._max_id = defaultdict(int) # Let's ask the broker its options self.broker = broker client = Client(self.broker) # this will timeout in case the broker is unreachable result = client.ping() self.endpoints = result['endpoints'] # Setup the zmq sockets self.loop = ioloop.IOLoop() self.ctx = zmq.Context() # backend socket - used to receive work from the broker self._backend = self.ctx.socket(zmq.ROUTER) self._backend.identity = self.agent_id self._backend.connect(self.endpoints['backend']) # register socket - used to register into the broker self._reg = self.ctx.socket(zmq.PUSH) self._reg.connect(self.endpoints['register']) # hearbeat socket - used to check if the broker is alive heartbeat = self.endpoints.get('heartbeat') if heartbeat is not None: logger.info("Hearbeat activated") self.ping = Stethoscope(heartbeat, onbeatlost=self.lost, delay=ping_delay, retries=ping_retries, ctx=self.ctx, io_loop=self.loop, onregister=self.register) else: self.ping = None # Setup the zmq streams. self._backstream = zmqstream.ZMQStream(self._backend, self.loop) self._backstream.on_recv(self._handle_recv_back) self._check = ioloop.PeriodicCallback(self._check_proc, ping_delay * 1000, io_loop=self.loop)
def verify_broker(broker_endpoint=DEFAULT_FRONTEND, timeout=1.): """ Return True if there's a working broker bound at broker_endpoint """ from loads.transport.client import Client client = Client(broker_endpoint) try: return client.ping(timeout=timeout, log_exceptions=False) except TimeoutError: return None finally: client.close()
def verify_broker(broker_endpoint=DEFAULT_FRONTEND, timeout=1.): """ Return True if there's a working broker bound at broker_endpoint """ from loads.transport.client import Client client = Client(broker_endpoint) try: return client.ping(timeout=timeout, log_exceptions=False) except TimeoutError: return None finally: client.close()
def sync(self, run_id): if self.args.get('agents') is None: return self.run_id = run_id # we're asking the broker about the latest counts self.counts = defaultdict(int) client = Client(self.args['broker']) for line in client.get_data(run_id, groupby=True): self.counts[line['data_type']] += line['count']
def sync(self, run_id): if self.args.get('agents') is None: return self.run_id = run_id # we're asking the broker about the latest counts self.counts = defaultdict(int) client = Client(self.args['broker']) for line in client.get_data(run_id, groupby=True): self.counts[line['data_type']] += line['count']
def _get_values(self, name): if name in 'failures': key = 'addFailure' elif name == 'errors': key = 'addError' else: raise NotImplementedError(name) client = Client(self.args['broker']) for line in client.get_data(self.run_id, data_type=key): line = line['exc_info'] yield [line]
def _get_values(self, name): """Calls the broker to get the errors or failures. """ if name in 'failures': key = 'addFailure' elif name == 'errors': key = 'addError' client = Client(self.args['broker']) for line in client.get_data(self.run_id, data_type=key): line = line['exc_info'] yield [line]
def _get_values(self, name): """Calls the broker to get the errors or failures. """ if name in 'failures': key = 'addFailure' elif name == 'errors': key = 'addError' client = Client(self.args['broker']) for line in client.get_data(self.run_id, data_type=key): line = line['exc_info'] yield [line]
def start_servers(): global _RUNNING if _RUNNING: return start_process('loads.transport.broker') for x in range(3): start_process('loads.transport.agent') start_process('loads.examples.echo_server') # wait for the echo server to be started tries = 0 while True: try: requests.get('http://0.0.0.0:9000') break except requests.ConnectionError: time.sleep(.3) tries += 1 if tries > 3: raise # wait for the broker to be up with 3 slaves. client = Client() while len(client.list()) != 3: time.sleep(.1) # control that the broker is responsive client.ping() for wid in client.list(): assert client.status(wid) == {} client.close() _RUNNING = True
def setUpClass(cls): if 'TRAVIS' in os.environ: return start_servers() cls.client = Client() cls.location = os.getcwd() cls.dirs = []
def _execute(self): # calling the clients now self.test_result.startTestRun() cb = ioloop.PeriodicCallback(self.refresh, 100, self.loop) cb.start() try: client = Client(self.args['broker']) logger.debug('Calling the broker...') client.run(self.args) logger.debug('Waiting for results') self.loop.start() finally: # end.. cb.stop() self.test_result.stopTestRun() self.context.destroy() self.flush()
def setUpClass(cls): if 'TRAVIS' in os.environ: return start_servers() cls.client = Client() cls.location = os.getcwd() cls.loop = ioloop.IOLoop() cls.dirs = [] cls.db = BrokerDB(cls.loop, db='python')
def _init(self): # close any previous connector if self.client is not None: try: self.close() except zmq.ZMQError: pass self.db = get_database(self.backend, **self.dboptions) self.client = Client(self.broker, timeout_max_overflow=20.)
def health_check(self): client = Client(self.broker, timeout_max_overflow=20.) ping = client.ping() total_agents = len(ping['agents']) if total_agents == 0: msg = 'No agents currently registered.' return False, msg, 0 runs = client.list_runs().items() busy_agents = sum([len(agents) for run_id, agents in runs]) avail = total_agents - busy_agents if avail == 0: # no agents are available. msg = 'All agents are busy.' return False, msg, 0 args = {'fqn': 'loads.examples.test_blog.TestWebSite.test_health', 'hits': '1', 'agents': avail, 'users': '1', 'detach': True} client.run(args) return True, 'Health check launched', avail
def client(self): if self._client is None: self._client = Client(self.args['broker'], ssh=self.args.get('ssh')) return self._client
def __init__(self, broker=DEFAULT_FRONTEND, receiver=DEFAULT_AGENT_RECEIVER, ping_delay=10., ping_retries=3, params=None, timeout=DEFAULT_TIMEOUT_MOVF, max_age=DEFAULT_MAX_AGE, max_age_delta=DEFAULT_MAX_AGE_DELTA): logger.debug('Initializing the agent.') self.debug = logger.isEnabledFor(logging.DEBUG) self.params = params self.pid = os.getpid() self.timeout = timeout self.max_age = max_age self.max_age_delta = max_age_delta self.delayed_exit = None self.env = os.environ.copy() self.running = False self._workers = {} self._run_args = {} self._run_started_at = {} self._max_id = defaultdict(int) self._started = self._stopped = self._launched = 0 self.loop = ioloop.IOLoop() self.ctx = zmq.Context() # Setup the zmq sockets # Let's ask the broker its options self.broker = broker client = Client(self.broker) result = client.ping() self.endpoints = result['endpoints'] # backend socket - used to receive work from the broker self._backend = self.ctx.socket(zmq.REP) self._backend.identity = str(self.pid) self._backend.connect(self.endpoints['backend']) # register socket - used to register into the broker self._reg = self.ctx.socket(zmq.PUSH) self._reg.connect(self.endpoints['register']) # receiver socket - used to receive results from workers self._receiver_socket = receiver.format(pid=self.pid) register_ipc_file(self._receiver_socket) self._receiver = self.ctx.socket(zmq.PULL) self._receiver.bind(self._receiver_socket) # push socket - used to send back results to the broker self._push = self.ctx.socket(zmq.PUSH) self._push.set_hwm(8096 * 10) self._push.setsockopt(zmq.LINGER, -1) self._push.connect(self.endpoints['receiver']) # hearbeat socket - used to check if the broker is alive heartbeat = self.endpoints.get('heartbeat') if heartbeat is not None: self.ping = Stethoscope(heartbeat, onbeatlost=self.lost, delay=ping_delay, retries=ping_retries, ctx=self.ctx, io_loop=self.loop, onregister=self.register) else: self.ping = None # Setup the zmq streams. self._backstream = zmqstream.ZMQStream(self._backend, self.loop) self._backstream.on_recv(self._handle_recv_back) self._rcvstream = zmqstream.ZMQStream(self._receiver, self.loop) self._rcvstream.on_recv(self._handle_events) self._check = ioloop.PeriodicCallback(self._check_proc, ping_delay * 1000, io_loop=self.loop)
def run(args): is_slave = args.get('slave', False) has_agents = args.get('agents', None) attach = args.get('attach', False) if not attach and (is_slave or not has_agents): if args.get('test_runner', None) is not None: runner = ExternalRunner else: runner = LocalRunner try: return runner(args).execute() except Exception: print traceback.format_exc() raise else: if attach: # find out what's running client = Client(args['broker']) try: runs = client.list_runs() except TimeoutError: logger.info("Can't reach the broker at %r" % args['broker']) client.close() return 1 if len(runs) == 0: logger.info("Nothing seem to be running on that broker.") client.close() return 1 elif len(runs) == 1: run_id, run_data = runs.items()[0] __, started = run_data[-1] else: # we need to pick one raise NotImplementedError() counts = client.get_counts(run_id) events = [event for event, hits in counts] if 'stopTestRun' in events: logger.info("This test has just stopped.") client.close() return 1 metadata = client.get_metadata(run_id) logger.debug('Reattaching run %r' % run_id) started = datetime.utcfromtimestamp(started) runner = DistributedRunner(args) try: return runner.attach(run_id, started, counts, metadata) except KeyboardInterrupt: _detach_question(runner) else: logger.debug('Summoning %d agents' % args['agents']) runner = DistributedRunner(args) try: return runner.execute() except KeyboardInterrupt: _detach_question(runner)
def main(sysargs=None): if sysargs is None: sysargs = sys.argv[1:] parser = argparse.ArgumentParser(description='Runs a load test.') parser.add_argument('fqn', help='Fully Qualified Name of the test', nargs='?') parser.add_argument('--config', help='Configuration file to read', type=str, default=None) parser.add_argument('-u', '--users', help='Number of virtual users', type=str, default='1') parser.add_argument('--test-dir', help='Directory to run the test from', type=str, default=None) parser.add_argument('--python-dep', help='Python (PyPI) dependencies ' 'to install', action='append', default=[]) parser.add_argument('--include-file', help='File(s) to include (needed for the test) ' '- glob-style', action='append', default=[]) # loads works with hits or duration group = parser.add_mutually_exclusive_group() group.add_argument('--hits', help='Number of hits per user', type=str, default=None) group.add_argument('-d', '--duration', help='Duration of the test (s)', type=int, default=None) parser.add_argument('--version', action='store_true', default=False, help='Displays Loads version and exits.') parser.add_argument('--test-runner', default=None, help='The path to binary to use as the test runner ' 'when in distributed mode. The default is ' 'this (python) runner') parser.add_argument('--server-url', default=None, help='The URL of the server you want to test. It ' 'will override any value your provided in ' 'the tests for the WebTest client.') parser.add_argument('--observer', action='append', help='Callable that will receive the final results. ' 'Only in distributed mode (runs on the broker)') parser.add_argument('--no-patching', help='Deactivate Gevent monkey patching.', action='store_true', default=False) # # distributed options # parser.add_argument('-a', '--agents', help='Number of agents to use.', type=int) parser.add_argument('--zmq-receiver', default=None, help=('ZMQ socket where the runners send the events to' ' (opened on the agent side).')) parser.add_argument('--zmq-publisher', default=DEFAULT_PUBLISHER, help='ZMQ socket where the test results messages ' 'are published.') parser.add_argument('--ping-broker', action='store_true', default=False, help='Pings the broker to get info, display it and ' 'exits.') parser.add_argument('--check-cluster', action='store_true', default=False, help='Runs a test on all agents then exits.') parser.add_argument('--purge-broker', action='store_true', default=False, help='Stops all runs on the broker and exits.') parser.add_argument('-b', '--broker', help='Broker endpoint', default=DEFAULT_FRONTEND) outputs = [st.name for st in output_list()] outputs.sort() parser.add_argument('--quiet', action='store_true', default=False, help='Do not print any log messages.') parser.add_argument('--output', action='append', default=['stdout'], help='The output which will get the results', choices=outputs) parser.add_argument('--attach', help='Reattach to a distributed run', action='store_true', default=False) parser.add_argument('--detach', help='Detach immediatly the current ' 'distributed run', action='store_true', default=False) # Adds the per-output and per-runner options. add_options(RUNNERS, parser, fmt='--{name}-{option}') add_options(output_list(), parser, fmt='--output-{name}-{option}') args = parser.parse_args(sysargs) if args.config is not None: # second pass ! config = Config(args.config) config_args = config.scan_args(parser, strip_prefixes=['loads']) if 'fqn' in config['loads']: config_args += [config['loads']['fqn']] args = parser.parse_args(args=sysargs + config_args) if args.quiet and 'stdout' in args.output: args.output.remove('stdout') # loggers setting wslogger = logging.getLogger('ws4py') ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) wslogger.addHandler(ch) set_logger() if args.version: print(__version__) sys.exit(0) if args.ping_broker: client = Client(args.broker) res = client.ping() print('Broker running on pid %d' % res['pid']) print('%d agents registered' % len(res['agents'])) print('endpoints:') for name, location in res['endpoints'].items(): print(' - %s: %s' % (name, location)) runs = client.list_runs() if len(runs) == 0: print('Nothing is running right now.') else: print('We have %d run(s) right now:' % len(runs)) for run_id, agents in runs.items(): print(' - %s with %d agent(s)' % (run_id, len(agents))) sys.exit(0) if args.purge_broker: client = Client(args.broker) runs = client.list_runs() if len(runs) == 0: print('Nothing to purge.') else: print('We have %d run(s) right now:' % len(runs)) for run_id, workers in runs.items(): print('Purging %s...' % run_id) client.stop_run(run_id) print('Purged.') sys.exit(0) if args.check_cluster: args.fqn = 'loads.examples.test_blog.TestWebSite.test_health' client = Client(args.broker) res = client.ping() args.agents = len(res['agents']) args.hits = '1' print('Running a healt check on all %d agents' % args.agents) if args.fqn is None and not args.attach: parser.print_usage() sys.exit(0) args = dict(args._get_kwargs()) res = run(args) return res
def main(sysargs=None): # parsing the command line args, parser = _parse(sysargs) # loggers setting wslogger = logging.getLogger('ws4py') ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) wslogger.addHandler(ch) set_logger() if args.version: print(__version__) sys.exit(0) if args.ssh: if args.broker == DEFAULT_FRONTEND: args.broker = DEFAULT_SSH_FRONTEND # control that we have pexpect try: import pexpect # NOQA except ImportError: print("To use --ssh you need pexpect") print("Try: pip install pexpect") sys.exit(0) if args.ping_broker or args.purge_broker or args.check_cluster: client = Client(args.broker, ssh=args.ssh) ping = client.ping() if args.purge_broker: runs = client.purge_broker() if len(runs) == 0: print('Nothing to purge.') else: print('We have %d run(s) right now:' % len(runs)) print('Purged.') sys.exit(0) elif args.ping_broker: print('Broker running on pid %d' % ping['pid']) agents = ping['agents'] print('%d agents registered' % len(agents)) agents = agents.items() agents.sort() for agent_id, agent_info in agents: print(' - %s on %s' % (agent_info['pid'], agent_info['hostname'])) print('endpoints:') endpoints = ping['endpoints'].items() endpoints.sort() for name, location in endpoints: print(' - %s: %s' % (name, location)) runs = client.list_runs() if len(runs) == 0: print('Nothing is running right now.') else: print('We have %d run(s) right now:' % len(runs)) for run_id, agents in runs.items(): print(' - %s with %d agent(s)' % (run_id, len(agents))) sys.exit(0) elif args.check_cluster: total_agents = len(ping['agents']) if total_agents == 0: print('No agents currently registered.') sys.exit(0) runs = client.list_runs().items() busy_agents = sum([len(agents) for run_id, agents in runs]) avail = total_agents - busy_agents if avail == 0: # no agents are available. print('All agents are busy.') sys.exit(0) args.fqn = 'loads.examples.test_blog.TestWebSite.test_health' args.agents = avail args.hits = '1' print('Running a health check on all %d agents' % args.agents) # if we don't have an fqn or we're not attached, something's wrong if args.fqn is None and not args.attach: parser.print_usage() sys.exit(0) args = dict(args._get_kwargs()) res = run(args) return res
def main(sysargs=None): # parsing the command line args, parser = _parse(sysargs) # loggers setting wslogger = logging.getLogger('ws4py') ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) wslogger.addHandler(ch) set_logger() if args.version: print(__version__) sys.exit(0) if args.ssh: if args.broker == DEFAULT_FRONTEND: args.broker = DEFAULT_SSH_FRONTEND # control that we have pexpect try: import pexpect # NOQA except ImportError: print("To use --ssh you need pexpect") print("Try: pip install pexpect") sys.exit(0) if args.ping_broker or args.purge_broker or args.check_cluster: client = Client(args.broker, ssh=args.ssh) ping = client.ping() if args.purge_broker: runs = client.purge_broker() if len(runs) == 0: print('Nothing to purge.') else: print('We have %d run(s) right now:' % len(runs)) print('Purged.') sys.exit(0) elif args.ping_broker: print('Broker running on pid %d' % ping['pid']) agents = ping['agents'] print('%d agents registered' % len(agents)) agents = agents.items() agents.sort() for agent_id, agent_info in agents: print(' - %s on %s' % (agent_info['pid'], agent_info['hostname'])) print('endpoints:') endpoints = ping['endpoints'].items() endpoints.sort() for name, location in endpoints: print(' - %s: %s' % (name, location)) runs = client.list_runs() if len(runs) == 0: print('Nothing is running right now.') else: print('We have %d run(s) right now:' % len(runs)) for run_id, agents in runs.items(): print(' - %s with %d agent(s)' % (run_id, len(agents))) sys.exit(0) elif args.check_cluster: total_agents = len(ping['agents']) if total_agents == 0: print('No agents currently registered.') sys.exit(0) runs = client.list_runs().items() busy_agents = sum([len(agents) for run_id, agents in runs]) avail = total_agents - busy_agents if avail == 0: # no agents are available. print('All agents are busy.') sys.exit(0) args.fqn = 'loads.examples.test_blog.TestWebSite.test_health' args.agents = avail args.hits = '1' print('Running a health check on all %d agents' % args.agents) # if we don't have an fqn or we're not attached, something's wrong if args.fqn is None and not args.attach: parser.print_usage() sys.exit(0) args = dict(args._get_kwargs()) res = run(args) return res
def client(self): if self._client is None: self._client = Client(self.args['broker']) return self._client
def __init__(self): self.db = get_database( settings.LOADS_DATABASE['backend'], **settings.LOADS_DATABASE['options'] ) self.client = Client(settings.LOADS_BROKER)
def main(sysargs=None): if sysargs is None: sysargs = sys.argv[1:] parser = argparse.ArgumentParser(description='Runs a load test.') parser.add_argument('fqn', help='Fully Qualified Name of the test', nargs='?') parser.add_argument('--config', help='Configuration file to read', type=str, default=None) parser.add_argument('-u', '--users', help='Number of virtual users', type=str, default='1') parser.add_argument('--test-dir', help='Directory to run the test from', type=str, default=None) parser.add_argument('--python-dep', help='Python (PyPI) dependencies ' 'to install', action='append', default=[]) parser.add_argument('--include-file', help='File(s) to include (needed for the test) ' '- glob-style', action='append', default=[]) # loads works with hits or duration group = parser.add_mutually_exclusive_group() group.add_argument('--hits', help='Number of hits per user', type=str, default=None) group.add_argument('-d', '--duration', help='Duration of the test (s)', type=int, default=None) parser.add_argument('--version', action='store_true', default=False, help='Displays Loads version and exits.') parser.add_argument('--test-runner', default=None, help='The path to binary to use as the test runner ' 'when in distributed mode. The default is ' 'this (python) runner') parser.add_argument('--server-url', default=None, help='The URL of the server you want to test. It ' 'will override any value your provided in ' 'the tests for the WebTest client.') parser.add_argument('--observer', action='append', help='Callable that will receive the final results. ' 'Only in distributed mode (runs on the broker)') parser.add_argument('--no-patching', help='Deactivate Gevent monkey patching.', action='store_true', default=False) # # distributed options # parser.add_argument('-a', '--agents', help='Number of agents to use.', type=int) parser.add_argument('--zmq-receiver', default=None, help=('ZMQ socket where the runners send the events to' ' (opened on the agent side).')) parser.add_argument('--zmq-publisher', default=DEFAULT_PUBLISHER, help='ZMQ socket where the test results messages ' 'are published.') parser.add_argument('--ping-broker', action='store_true', default=False, help='Pings the broker to get info, display it and ' 'exits.') parser.add_argument('--check-cluster', action='store_true', default=False, help='Runs a test on all agents then exits.') parser.add_argument('--purge-broker', action='store_true', default=False, help='Stops all runs on the broker and exits.') parser.add_argument('-b', '--broker', help='Broker endpoint', default=DEFAULT_FRONTEND) outputs = [st.name for st in output_list()] outputs.sort() parser.add_argument('--quiet', action='store_true', default=False, help='Do not print any log messages.') parser.add_argument('--output', action='append', default=['stdout'], help='The output which will get the results', choices=outputs) parser.add_argument('--attach', help='Reattach to a distributed run', action='store_true', default=False) parser.add_argument('--detach', help='Detach immediatly the current ' 'distributed run', action='store_true', default=False) # Adds the per-output and per-runner options. add_options(RUNNERS, parser, fmt='--{name}-{option}') add_options(output_list(), parser, fmt='--output-{name}-{option}') args = parser.parse_args(sysargs) if args.config is not None: # second pass ! config = Config(args.config) config_args = config.scan_args(parser, strip_prefixes=['loads']) if 'fqn' in config['loads']: config_args += [config['loads']['fqn']] args = parser.parse_args(args=sysargs + config_args) if args.quiet and 'stdout' in args.output: args.output.remove('stdout') # loggers setting wslogger = logging.getLogger('ws4py') ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) wslogger.addHandler(ch) set_logger() if args.version: print(__version__) sys.exit(0) if args.ping_broker: client = Client(args.broker) res = client.ping() print('Broker running on pid %d' % res['pid']) print('%d agents registered' % len(res['agents'])) print('endpoints:') for name, location in res['endpoints'].items(): print(' - %s: %s' % (name, location)) runs = client.list_runs() if len(runs) == 0: print('Nothing is running right now.') else: print('We have %d run(s) right now:' % len(runs)) for run_id, agents in runs.items(): print(' - %s with %d agent(s)' % (run_id, len(agents))) sys.exit(0) if args.purge_broker: client = Client(args.broker) runs = client.list_runs() if len(runs) == 0: print('Nothing to purge.') else: print('We have %d run(s) right now:' % len(runs)) for run_id, workers in runs.items(): print('Purging %s...' % run_id) client.stop_run(run_id) print('Purged.') sys.exit(0) if args.check_cluster: args.fqn = 'loads.examples.test_blog.TestWebSite.test_health' client = Client(args.broker) res = client.ping() args.agents = len(res['agents']) args.hits = '1' print('Running a healt check on all %d agents' % args.agents) if args.fqn is None and not args.attach: parser.print_usage() sys.exit(0) args = dict(args._get_kwargs()) res = run(args) return res
def setUpClass(cls): cls.procs = start_servers() cls.client = Client() cls.location = os.getcwd() cls.dirs = []
class Controller(object): def __init__(self, db='redis', dboptions=None, broker=None): if dboptions is None: self.dboptions = {} else: self.dboptions = dboptions self.broker = broker self.backend = db self.db = None self.client = None self._init() def reconnect(self): self._init() def _init(self): # close any previous connector if self.client is not None: try: self.close() except zmq.ZMQError: pass self.db = get_database(self.backend, **self.dboptions) self.client = Client(self.broker, timeout_max_overflow=20.) def health_check(self): client = Client(self.broker, timeout_max_overflow=20.) ping = client.ping() total_agents = len(ping['agents']) if total_agents == 0: msg = 'No agents currently registered.' return False, msg, 0 runs = client.list_runs().items() busy_agents = sum([len(agents) for run_id, agents in runs]) avail = total_agents - busy_agents if avail == 0: # no agents are available. msg = 'All agents are busy.' return False, msg, 0 args = {'fqn': 'loads.examples.test_blog.TestWebSite.test_health', 'hits': '1', 'agents': avail, 'users': '1', 'detach': True} client.run(args) return True, 'Health check launched', avail def close(self): self.client.close() self.db.close() def stop(self, run_id): self.client.stop_run(run_id) self.get_broker_info() def agent_status(self, agent_id): return self.client.status(agent_id) def ping_db(self): return self.db.ping() def get_broker_info(self): return self.client.ping() def get_runs(self, **filters): if filters == {}: return self.db.get_runs() runs = [] for run_id in self.db.get_runs(): metadata = self.db.get_metadata(run_id) for key, value in filters.items(): if key not in metadata: continue else: if metadata[key] == value: runs.append(run_id) break return runs def get_run_info(self, run_id, data=True): result = {} # we need to batch XXX if data: data = self.db.get_data(run_id, size=100) result['data'] = data errors = {} for line in self.db.get_errors(run_id, size=100): error, tb, tb2 = line['exc_info'] hashed = md5(error).hexdigest() if hashed in errors: old_count, tb = errors[hashed] errors[hashed] = old_count + 1, tb else: errors[hashed] = 1, tb + '\n' + tb2 errors = errors.items() errors.sort() result['errors'] = errors counts = self.db.get_counts(run_id) custom = {} for key, value in list(counts.items()): if key in _COUNTS: continue custom[key] = value del counts[key] result['custom'] = custom metadata = self.db.get_metadata(run_id) started = metadata.get('started') ended = metadata.get('ended', time.time()) active = metadata.get('active', False) # aproximative = should be set by the broker if started is not None: if active: elapsed = time.time() - started else: elapsed = ended - started hits = counts.get('add_hit', 0) if hits == 0: rps = 0 else: rps = hits / elapsed def _stamp2time(stamp): if not isinstance(stamp, datetime): stamp = datetime.fromtimestamp(int(stamp)) return stamp.strftime('%Y-%m-%d %H:%M:%S UTC') started = datetime.fromtimestamp(int(started)) metadata['started'] = _stamp2time(started) counts['rps'] = int(rps) counts['elapsed'] = seconds_to_time(elapsed) ended = started + timedelta(seconds=elapsed) counts['finished'] = finished(ended) metadata['ended'] = _stamp2time(ended) counts['success'] = (counts.get('addError', 0) == 0 and counts.get('addFailure', 0) == 0) metadata['style'] = counts['success'] and 'green' or 'red' else: metadata['started'] = metadata['ended'] = 'N/A' counts['rps'] = 0 counts['elapsed'] = 0 counts['finished'] = 'N/A' counts['success'] = False metadata['style'] = 'red' if metadata.get('active', False): metadata['active_label'] = 'Running' else: metadata['active_label'] = 'Ended' result['counts'] = counts result['metadata'] = metadata return result
class Controller(object): def __init__(self): self.db = get_database( settings.LOADS_DATABASE['backend'], **settings.LOADS_DATABASE['options'] ) self.client = Client(settings.LOADS_BROKER) def stop(self, run_id): self.client.stop_run(run_id) self.get_broker_info() def ping_db(self): return self.db.ping() def get_broker_info(self): return self.client.ping() def get_runs(self, **filters): if not filters: return self.db.get_runs() runs = [] for run in self.db.get_runs(): info = self.get_run_info(run) for key, value in filters.items(): if key not in info['metadata']: continue else: if info['metadata'][key] == value: runs.append(run) return runs def get_run_info(self, run_id, data=True): result = {} if data: data = self.db.get_data(run_id, size=100) # Make it easier to serialize so a list is better than a generator result['data'] = list(data) errors = {} lines = self.db.get_data(run_id, data_type='addError', size=100) for line in lines: error, tb, tb2 = line['exc_info'] hashed = md5(error).hexdigest() if hashed in errors: old_count, tb = errors[hashed] errors[hashed] = old_count + 1, tb else: errors[hashed] = 1, tb + '\n' + tb2 errors = errors.items() errors.sort() result['errors'] = errors counts = self.db.get_counts(run_id) custom = {} for key, value in list(counts.items()): if key in _COUNTS: continue custom[key] = value del counts[key] result['custom'] = custom metadata = self.db.get_metadata(run_id) started = metadata.get('started') ended = metadata.get('ended', time.time()) active = metadata.get('active', False) # aproximative = should be set by the broker if started is not None: if active: elapsed = time.time() - started else: elapsed = ended - started hits = counts.get('add_hit', 0) if hits == 0: rps = 0 else: rps = hits / elapsed #TODO: Move to template started = datetime.fromtimestamp(int(started)) metadata['started'] = started.strftime('%Y-%m-%d %H:%M:%S') counts['rps'] = int(rps) counts['elapsed'] = naturaltime(elapsed) ended = started + timedelta(seconds=elapsed) counts['finished'] = naturaltime(ended) counts['success'] = counts.get('addError', 0) == 0 metadata['style'] = counts['success'] and 'green' or 'red' else: metadata['started'] = 'N/A' counts['rps'] = 0 counts['elapsed'] = 0 counts['finished'] = 'N/A' counts['success'] = False metadata['style'] = 'red' if metadata.get('active', False): metadata['active_label'] = 'Running' else: metadata['active_label'] = 'Ended' result['counts'] = counts result['metadata'] = metadata return result
def setUp(self): start_servers() self.client = Client()