def main(): colorama.init() parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) parser.add_option( '-S', '--swarming', metavar='URL', default='', help='Swarming server to use') swarming.add_filter_options(parser) parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)]) group = optparse.OptionGroup(parser, 'Load generated') group.add_option( '-s', '--send-rate', type='float', default=16., metavar='RATE', help='Rate (item/s) of sending requests as a float, default: %default') group.add_option( '-D', '--duration', type='float', default=60., metavar='N', help='Duration (s) of the sending phase of the load test, ' 'default: %default') group.add_option( '-m', '--concurrent', type='int', default=200, metavar='N', help='Maximum concurrent on-going requests, default: %default') group.add_option( '-t', '--timeout', type='float', default=3600., metavar='N', help='Timeout to get results, default: %default') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Display options') group.add_option( '--columns', type='int', default=graph.get_console_width(), metavar='N', help='For histogram display, default:%default') group.add_option( '--buckets', type='int', default=20, metavar='N', help='Number of buckets for histogram display, default:%default') parser.add_option_group(group) parser.add_option( '--dump', metavar='FOO.JSON', help='Dumps to json file') parser.add_option( '-v', '--verbose', action='store_true', help='Enables logging') options, args = parser.parse_args() logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL) if args: parser.error('Unsupported args: %s' % args) options.swarming = options.swarming.rstrip('/') if not options.swarming: parser.error('--swarming is required.') if options.duration <= 0: parser.error('Needs --duration > 0. 0.01 is a valid value.') swarming.process_filter_options(parser, options) total = options.send_rate * options.duration print( 'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; ' 'total %d' % (options.send_rate, options.duration, options.concurrent, options.timeout, total)) print('[processing/processed/todo]') # This is used so there's no clash between runs and actual real usage. unique = ''.join(random.choice(string.ascii_letters) for _ in range(8)) columns = [('processing', 0), ('processed', 0), ('todo', 0)] progress = threading_utils.Progress(columns) index = 0 with threading_utils.ThreadPoolWithProgress( progress, 1, options.concurrent, 0) as pool: try: start = time.time() while True: duration = time.time() - start if duration > options.duration: break should_have_triggered_so_far = int(duration * options.send_rate) while index < should_have_triggered_so_far: pool.add_task( 0, trigger_task, options.swarming, options.dimensions, progress, unique, options.timeout, index) progress.update_item('', todo=1) index += 1 progress.print_update() time.sleep(0.01) except KeyboardInterrupt: aborted = pool.abort() progress.update_item( 'Got Ctrl-C. Aborted %d unsent tasks.' % aborted, raw=True, todo=-aborted) progress.print_update() finally: # TODO(maruel): We could give up on collecting results for the on-going # tasks but that would need to be optional. progress.update_item('Getting results for on-going tasks.', raw=True) results = sorted(pool.join()) progress.print_update() # At this point, progress is not used anymore. print('') print(' - Took %.1fs.' % (time.time() - start)) print('') print_results(results, options.columns, options.buckets) if options.dump: with open(options.dump, 'w') as f: json.dump(results, f, separators=(',',':')) return 0
def main(): colorama.init() parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) parser.add_option('-S', '--swarming', metavar='URL', default='', help='Swarming server to use') swarming.add_filter_options(parser) parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)]) group = optparse.OptionGroup(parser, 'Load generated') group.add_option( '-s', '--send-rate', type='float', default=16., metavar='RATE', help='Rate (item/s) of sending requests as a float, default: %default') group.add_option( '-D', '--duration', type='float', default=60., metavar='N', help='Duration (s) of the sending phase of the load test, ' 'default: %default') group.add_option( '-m', '--concurrent', type='int', default=200, metavar='N', help='Maximum concurrent on-going requests, default: %default') group.add_option( '-t', '--timeout', type='float', default=15 * 60., metavar='N', help='Task expiration and timeout to get results, the task itself will ' 'have %ds less than the value provided. Default: %%default' % TIMEOUT_OVERHEAD) group.add_option('-o', '--output-size', type='int', default=100, metavar='N', help='Bytes sent to stdout, default: %default') group.add_option( '--sleep', type='int', default=60, metavar='N', help='Amount of time the bot should sleep, e.g. faking work, ' 'default: %default') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Display options') group.add_option('--columns', type='int', default=graph.get_console_width(), metavar='N', help='For histogram display, default:%default') group.add_option( '--buckets', type='int', default=20, metavar='N', help='Number of buckets for histogram display, default:%default') parser.add_option_group(group) parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file') parser.add_option('-v', '--verbose', action='store_true', help='Enables logging') options, args = parser.parse_args() logging.basicConfig( level=logging.INFO if options.verbose else logging.FATAL) if args: parser.error('Unsupported args: %s' % args) options.swarming = options.swarming.rstrip('/') if not options.swarming: parser.error('--swarming is required.') if options.duration <= 0: parser.error('Needs --duration > 0. 0.01 is a valid value.') swarming.process_filter_options(parser, options) total = int(round(options.send_rate * options.duration)) print( 'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; ' 'total %d' % (options.send_rate, options.duration, options.concurrent, options.timeout, total)) print('[processing/processed/todo]') # This is used so there's no clash between runs and actual real usage. unique = ''.join(random.choice(string.ascii_letters) for _ in range(8)) columns = [('processing', 0), ('processed', 0), ('todo', 0)] progress = threading_utils.Progress(columns) index = 0 results = [] with threading_utils.ThreadPoolWithProgress(progress, 1, options.concurrent, 0) as pool: try: start = time.time() while True: duration = time.time() - start if duration > options.duration: break should_have_triggered_so_far = int( round(duration * options.send_rate)) while index < should_have_triggered_so_far: pool.add_task(0, trigger_task, options.swarming, options.dimensions, options.sleep, options.output_size, progress, unique, options.timeout, index) progress.update_item('', todo=1) index += 1 progress.print_update() time.sleep(0.01) progress.update_item('Getting results for on-going tasks.', raw=True) for i in pool.iter_results(): results.append(i) # This is a bit excessive but it's useful in the case where some tasks # hangs, so at least partial data is available. if options.dump: results.sort() if os.path.exists(options.dump): os.rename(options.dump, options.dump + '.old') with open(options.dump, 'wb') as f: json.dump(results, f, separators=(',', ':')) if not options.dump: results.sort() except KeyboardInterrupt: aborted = pool.abort() progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' % aborted, raw=True, todo=-aborted) progress.print_update() progress.print_update() # At this point, progress is not used anymore. print('') print(' - Took %.1fs.' % (time.time() - start)) print('') print_results(results, options.columns, options.buckets) return 0
def main(): colorama.init() parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) parser.add_option( '-S', '--swarming', metavar='URL', default='', help='Swarming server to use') parser.add_option( '--suffix', metavar='NAME', default='', help='Bot suffix name to use') swarming.add_filter_options(parser) # Use improbable values to reduce the chance of interferring with real slaves. parser.set_defaults( dimensions=[ ('cpu', ['arm36']), ('hostname', socket.getfqdn()), ('os', OS_NAME), ]) group = optparse.OptionGroup(parser, 'Load generated') group.add_option( '--slaves', type='int', default=300, metavar='N', help='Number of swarm bot slaves, default: %default') group.add_option( '-c', '--consume', type='float', default=60., metavar='N', help='Duration (s) for consuming a request, default: %default') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Display options') group.add_option( '--columns', type='int', default=graph.get_console_width(), metavar='N', help='For histogram display, default:%default') group.add_option( '--buckets', type='int', default=20, metavar='N', help='Number of buckets for histogram display, default:%default') parser.add_option_group(group) parser.add_option( '--dump', metavar='FOO.JSON', help='Dumps to json file') parser.add_option( '-v', '--verbose', action='store_true', help='Enables logging') options, args = parser.parse_args() logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL) if args: parser.error('Unsupported args: %s' % args) options.swarming = options.swarming.rstrip('/') if not options.swarming: parser.error('--swarming is required.') if options.consume <= 0: parser.error('Needs --consume > 0. 0.01 is a valid value.') swarming.process_filter_options(parser, options) print( 'Running %d slaves, each task lasting %.1fs' % ( options.slaves, options.consume)) print('Ctrl-C to exit.') print('[processing/processed/bots]') columns = [('processing', 0), ('processed', 0), ('bots', 0)] progress = threading_utils.Progress(columns) events = Queue.Queue() start = time.time() kill_event = threading.Event() swarm_bot_version_hash = calculate_version(options.swarming + '/bot_code') hostname = get_hostname() if options.suffix: hostname += '-' + options.suffix slaves = [ FakeSwarmBot( options.swarming, options.dimensions, swarm_bot_version_hash, hostname, i, progress, options.consume, events, kill_event) for i in range(options.slaves) ] try: # Wait for all the slaves to come alive. while not all(s.is_alive() for s in slaves): time.sleep(0.01) progress.update_item('Ready to run') while slaves: progress.print_update() time.sleep(0.01) # The slaves could be told to die. slaves = [s for s in slaves if s.is_alive()] except KeyboardInterrupt: kill_event.set() progress.update_item('Waiting for slaves to quit.', raw=True) progress.update_item('') while slaves: progress.print_update() slaves = [s for s in slaves if s.is_alive()] # At this point, progress is not used anymore. print('') print('Ran for %.1fs.' % (time.time() - start)) print('') results = list(events.queue) print_results(results, options.columns, options.buckets) if options.dump: with open(options.dump, 'w') as f: json.dump(results, f, separators=(',',':')) return 0
def main(): colorama.init() parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) parser.add_option( '-S', '--swarming', metavar='URL', default='', help='Swarming server to use') swarming.add_filter_options(parser) # Use improbable values to reduce the chance of interferring with real slaves. parser.set_defaults( dimensions=[ ('bits', '36'), ('machine', os.uname()[4] + '-experimental'), ('os', OS_NAME), ]) group = optparse.OptionGroup(parser, 'Load generated') group.add_option( '--slaves', type='int', default=300, metavar='N', help='Number of swarm bot slaves, default: %default') group.add_option( '-c', '--consume', type='float', default=60., metavar='N', help='Duration (s) for consuming a request, default: %default') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Display options') group.add_option( '--columns', type='int', default=graph.get_console_width(), metavar='N', help='For histogram display, default:%default') group.add_option( '--buckets', type='int', default=20, metavar='N', help='Number of buckets for histogram display, default:%default') parser.add_option_group(group) parser.add_option( '--dump', metavar='FOO.JSON', help='Dumps to json file') parser.add_option( '-v', '--verbose', action='store_true', help='Enables logging') options, args = parser.parse_args() logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL) if args: parser.error('Unsupported args: %s' % args) options.swarming = options.swarming.rstrip('/') if not options.swarming: parser.error('--swarming is required.') if options.consume <= 0: parser.error('Needs --consume > 0. 0.01 is a valid value.') swarming.process_filter_options(parser, options) print( 'Running %d slaves, each task lasting %.1fs' % ( options.slaves, options.consume)) print('Ctrl-C to exit.') print('[processing/processed/bots]') columns = [('processing', 0), ('processed', 0), ('bots', 0)] progress = threading_utils.Progress(columns) events = Queue.Queue() start = time.time() kill_event = threading.Event() swarm_bot_version_hash = calculate_version( options.swarming + '/get_slave_code') slaves = [ FakeSwarmBot( options.swarming, options.dimensions, swarm_bot_version_hash, i, progress, options.consume, events, kill_event) for i in range(options.slaves) ] try: # Wait for all the slaves to come alive. while not all(s.is_alive() for s in slaves): time.sleep(0.01) progress.update_item('Ready to run') while slaves: progress.print_update() time.sleep(0.01) # The slaves could be told to die. slaves = [s for s in slaves if s.is_alive()] except KeyboardInterrupt: kill_event.set() progress.update_item('Waiting for slaves to quit.', raw=True) progress.update_item('') while slaves: progress.print_update() slaves = [s for s in slaves if s.is_alive()] # At this point, progress is not used anymore. print('') print('Ran for %.1fs.' % (time.time() - start)) print('') results = events.queue print_results(results, options.columns, options.buckets) if options.dump: with open(options.dump, 'w') as f: json.dump(results, f, separators=(',',':')) return 0
def main(): colorama.init() parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) parser.add_option("-S", "--swarming", metavar="URL", default="", help="Swarming server to use") swarming.add_filter_options(parser) parser.set_defaults(dimensions=[("os", swarming_load_test_bot.OS_NAME)]) group = optparse.OptionGroup(parser, "Load generated") group.add_option( "-s", "--send-rate", type="float", default=16.0, metavar="RATE", help="Rate (item/s) of sending requests as a float, default: %default", ) group.add_option( "-D", "--duration", type="float", default=60.0, metavar="N", help="Duration (s) of the sending phase of the load test, " "default: %default", ) group.add_option( "-m", "--concurrent", type="int", default=200, metavar="N", help="Maximum concurrent on-going requests, default: %default", ) group.add_option( "-t", "--timeout", type="float", default=15 * 60.0, metavar="N", help="Task expiration and timeout to get results, the task itself will " "have %ds less than the value provided. Default: %%default" % TIMEOUT_OVERHEAD, ) group.add_option( "-o", "--output-size", type="int", default=100, metavar="N", help="Bytes sent to stdout, default: %default" ) group.add_option( "--sleep", type="int", default=60, metavar="N", help="Amount of time the bot should sleep, e.g. faking work, " "default: %default", ) parser.add_option_group(group) group = optparse.OptionGroup(parser, "Display options") group.add_option( "--columns", type="int", default=graph.get_console_width(), metavar="N", help="For histogram display, default:%default", ) group.add_option( "--buckets", type="int", default=20, metavar="N", help="Number of buckets for histogram display, default:%default", ) parser.add_option_group(group) parser.add_option("--dump", metavar="FOO.JSON", help="Dumps to json file") parser.add_option("-v", "--verbose", action="store_true", help="Enables logging") options, args = parser.parse_args() logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL) if args: parser.error("Unsupported args: %s" % args) options.swarming = options.swarming.rstrip("/") if not options.swarming: parser.error("--swarming is required.") if options.duration <= 0: parser.error("Needs --duration > 0. 0.01 is a valid value.") swarming.process_filter_options(parser, options) total = int(round(options.send_rate * options.duration)) print( "Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; " "total %d" % (options.send_rate, options.duration, options.concurrent, options.timeout, total) ) print("[processing/processed/todo]") # This is used so there's no clash between runs and actual real usage. unique = "".join(random.choice(string.ascii_letters) for _ in range(8)) columns = [("processing", 0), ("processed", 0), ("todo", 0)] progress = threading_utils.Progress(columns) index = 0 results = [] with threading_utils.ThreadPoolWithProgress(progress, 1, options.concurrent, 0) as pool: try: start = time.time() while True: duration = time.time() - start if duration > options.duration: break should_have_triggered_so_far = int(round(duration * options.send_rate)) while index < should_have_triggered_so_far: pool.add_task( 0, trigger_task, options.swarming, options.dimensions, options.sleep, options.output_size, progress, unique, options.timeout, index, ) progress.update_item("", todo=1) index += 1 progress.print_update() time.sleep(0.01) progress.update_item("Getting results for on-going tasks.", raw=True) for i in pool.iter_results(): results.append(i) # This is a bit excessive but it's useful in the case where some tasks # hangs, so at least partial data is available. if options.dump: results.sort() if os.path.exists(options.dump): os.rename(options.dump, options.dump + ".old") with open(options.dump, "wb") as f: json.dump(results, f, separators=(",", ":")) if not options.dump: results.sort() except KeyboardInterrupt: aborted = pool.abort() progress.update_item("Got Ctrl-C. Aborted %d unsent tasks." % aborted, raw=True, todo=-aborted) progress.print_update() progress.print_update() # At this point, progress is not used anymore. print("") print(" - Took %.1fs." % (time.time() - start)) print("") print_results(results, options.columns, options.buckets) return 0
def main(): colorama.init() parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) parser.add_option('-S', '--swarming', metavar='URL', default='', help='Swarming server to use') swarming.add_filter_options(parser) parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)]) group = optparse.OptionGroup(parser, 'Load generated') group.add_option( '-s', '--send-rate', type='float', default=16., metavar='RATE', help='Rate (item/s) of sending requests as a float, default: %default') group.add_option( '-D', '--duration', type='float', default=60., metavar='N', help='Duration (s) of the sending phase of the load test, ' 'default: %default') group.add_option( '-m', '--concurrent', type='int', default=200, metavar='N', help='Maximum concurrent on-going requests, default: %default') group.add_option('-t', '--timeout', type='float', default=3600., metavar='N', help='Timeout to get results, default: %default') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Display options') group.add_option('--columns', type='int', default=graph.get_console_width(), metavar='N', help='For histogram display, default:%default') group.add_option( '--buckets', type='int', default=20, metavar='N', help='Number of buckets for histogram display, default:%default') parser.add_option_group(group) parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file') parser.add_option('-v', '--verbose', action='store_true', help='Enables logging') options, args = parser.parse_args() logging.basicConfig( level=logging.INFO if options.verbose else logging.FATAL) if args: parser.error('Unsupported args: %s' % args) options.swarming = options.swarming.rstrip('/') if not options.swarming: parser.error('--swarming is required.') if options.duration <= 0: parser.error('Needs --duration > 0. 0.01 is a valid value.') swarming.process_filter_options(parser, options) total = options.send_rate * options.duration print( 'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; ' 'total %d' % (options.send_rate, options.duration, options.concurrent, options.timeout, total)) print('[processing/processed/todo]') # This is used so there's no clash between runs and actual real usage. unique = ''.join(random.choice(string.ascii_letters) for _ in range(8)) columns = [('processing', 0), ('processed', 0), ('todo', 0)] progress = threading_utils.Progress(columns) index = 0 with threading_utils.ThreadPoolWithProgress(progress, 1, options.concurrent, 0) as pool: try: start = time.time() while True: duration = time.time() - start if duration > options.duration: break should_have_triggered_so_far = int(duration * options.send_rate) while index < should_have_triggered_so_far: pool.add_task(0, trigger_task, options.swarming, options.dimensions, progress, unique, options.timeout, index) progress.update_item('', todo=1) index += 1 progress.print_update() time.sleep(0.01) except KeyboardInterrupt: aborted = pool.abort() progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' % aborted, raw=True, todo=-aborted) progress.print_update() finally: # TODO(maruel): We could give up on collecting results for the on-going # tasks but that would need to be optional. progress.update_item('Getting results for on-going tasks.', raw=True) results = sorted(pool.join()) progress.print_update() # At this point, progress is not used anymore. print('') print(' - Took %.1fs.' % (time.time() - start)) print('') print_results(results, options.columns, options.buckets) if options.dump: with open(options.dump, 'w') as f: json.dump(results, f, separators=(',', ':')) return 0