def reanalyze_results(project_name, project_path, results_dir): config_path = os.path.join(results_dir, 'config.cfg') run_time, rampup, console_logging, results_ts_interval, user_group_configs, results_database, post_run_script = configure(project_name, project_path, config_path) # Get the top-level directory name for our results dir if results_dir[-1] == os.path.sep: results_dir = results_dir[:-1] _, reran_project_time = os.path.split(results_dir) output_dir = os.path.join(project_path, 'results', reran_project_time) logger.debug("Storing reprocessed results in: %s", output_dir) logger.info('Re-analyzing results...\n') results.output_results( output_dir, os.path.join(output_dir, 'results.csv'), run_time, rampup, results_ts_interval, user_group_configs, template_dirs=get_mm_templates_dirs(project_path), ) logger.info('created: %s', os.path.join(output_dir, 'results.html'))
def reanalyze_results(project_name, project_path, results_dir): config_path = os.path.join(results_dir, 'config.cfg') run_time, rampup, console_logging, results_ts_interval, user_group_configs, results_database, post_run_script = configure( project_name, project_path, config_path) # Get the top-level directory name for our results dir if results_dir[-1] == os.path.sep: results_dir = results_dir[:-1] _, reran_project_time = os.path.split(results_dir) output_dir = os.path.join(project_path, 'results', reran_project_time) logger.debug("Storing reprocessed results in: %s", output_dir) logger.info('Re-analyzing results...\n') results.output_results( output_dir, os.path.join(output_dir, 'results.csv'), run_time, rampup, results_ts_interval, user_group_configs, template_dirs=get_mm_templates_dirs(project_path), ) logger.info('created: %s', os.path.join(output_dir, 'results.html'))
def run_test(project_name, project_path, remote_starter=None): if remote_starter is not None: remote_starter.test_running = True remote_starter.output_dir = None config_path = os.path.join(project_path, 'config.cfg') run_time, rampup, console_logging, results_ts_interval, user_group_configs, results_database, post_run_script = configure(project_name, project_path, config_path) run_localtime = time.localtime() time_str = time.strftime('%Y.%m.%d_%H.%M.%S', run_localtime) output_dir = os.path.join(project_path, 'results', 'results_%s' % time_str) logger.debug("Test output directory: %s", output_dir) # this queue is shared between all processes/threads queue = multiprocessing.Queue() rw = ResultsWriter(queue, output_dir, console_logging) rw.daemon = True rw.start() user_groups = [] for i, ug_config in enumerate(user_group_configs): ug = UserGroup( queue, i, ug_config.name, ug_config.num_threads, test_scripts[ug_config.script_file], run_time, rampup) user_groups.append(ug) for user_group in user_groups: user_group.start() start_time = time.time() if console_logging: for user_group in user_groups: user_group.join() else: print '\n user_groups: %i' % len(user_groups) print ' threads: %i\n' % (ug_config.num_threads * len(user_groups)) p = progressbar.ProgressBar(run_time) elapsed = 0 while elapsed < (run_time + 1): p.update_time(elapsed) if sys.platform.startswith('win'): print '%s transactions: %i timers: %i errors: %i\r' % (p, rw.trans_count, rw.timer_count, rw.error_count), else: print '%s transactions: %i timers: %i errors: %i' % (p, rw.trans_count, rw.timer_count, rw.error_count) sys.stdout.write(chr(27) + '[A' ) time.sleep(1) elapsed = time.time() - start_time print p while [user_group for user_group in user_groups if user_group.is_alive()] != []: if sys.platform.startswith('win'): logger.info('waiting for all requests to finish...\r') else: logger.info('waiting for all requests to finish...\r') time.sleep(.5) if not sys.platform.startswith('win'): print # all agents are done running at this point time.sleep(.2) # make sure the writer queue is flushed logger.info('analyzing results...\n') results.output_results( output_dir, os.path.join(output_dir, 'results.csv'), run_time, rampup, results_ts_interval, user_group_configs, template_dirs=get_mm_templates_dirs(project_path), ) logger.info('created: %s', os.path.join(output_dir, 'results.html')) # copy config file to results directory project_config = os.path.join(project_path, 'config.cfg') saved_config = os.path.join(output_dir, 'config.cfg') shutil.copy(project_config, saved_config) if results_database is not None: logger.info('loading results into database: %s\n', results_database) import multi_mechanize.resultsloader multi_mechanize.resultsloader.load_results_database( project_name, run_localtime, output_dir, results_database, run_time, rampup, results_ts_interval, user_group_configs) if post_run_script is not None: logger.info('running post_run_script: %s\n', post_run_script) subprocess.call(post_run_script) logger.info('done.') if remote_starter is not None: remote_starter.test_running = False remote_starter.output_dir = output_dir return
def run_test(project_name, project_path, remote_starter=None): if remote_starter is not None: remote_starter.test_running = True remote_starter.output_dir = None config_path = os.path.join(project_path, 'config.cfg') run_time, rampup, console_logging, results_ts_interval, user_group_configs, results_database, post_run_script = configure( project_name, project_path, config_path) run_localtime = time.localtime() time_str = time.strftime('%Y.%m.%d_%H.%M.%S', run_localtime) output_dir = os.path.join(project_path, 'results', 'results_%s' % time_str) logger.debug("Test output directory: %s", output_dir) # this queue is shared between all processes/threads queue = multiprocessing.Queue() rw = ResultsWriter(queue, output_dir, console_logging) rw.daemon = True rw.start() user_groups = [] for i, ug_config in enumerate(user_group_configs): ug = UserGroup(queue, i, ug_config.name, ug_config.num_threads, test_scripts[ug_config.script_file], run_time, rampup) user_groups.append(ug) for user_group in user_groups: user_group.start() start_time = time.time() if console_logging: for user_group in user_groups: user_group.join() else: print '\n user_groups: %i' % len(user_groups) print ' threads: %i\n' % (ug_config.num_threads * len(user_groups)) p = progressbar.ProgressBar(run_time) elapsed = 0 while elapsed < (run_time + 1): p.update_time(elapsed) if sys.platform.startswith('win'): print '%s transactions: %i timers: %i errors: %i\r' % ( p, rw.trans_count, rw.timer_count, rw.error_count), else: print '%s transactions: %i timers: %i errors: %i' % ( p, rw.trans_count, rw.timer_count, rw.error_count) sys.stdout.write(chr(27) + '[A') time.sleep(1) elapsed = time.time() - start_time print p while [ user_group for user_group in user_groups if user_group.is_alive() ] != []: if sys.platform.startswith('win'): logger.info('waiting for all requests to finish...\r') else: logger.info('waiting for all requests to finish...\r') time.sleep(.5) if not sys.platform.startswith('win'): print # all agents are done running at this point time.sleep(.2) # make sure the writer queue is flushed logger.info('analyzing results...\n') results.output_results( output_dir, os.path.join(output_dir, 'results.csv'), run_time, rampup, results_ts_interval, user_group_configs, template_dirs=get_mm_templates_dirs(project_path), ) logger.info('created: %s', os.path.join(output_dir, 'results.html')) # copy config file to results directory project_config = os.path.join(project_path, 'config.cfg') saved_config = os.path.join(output_dir, 'config.cfg') shutil.copy(project_config, saved_config) if results_database is not None: logger.info('loading results into database: %s\n', results_database) import multi_mechanize.resultsloader multi_mechanize.resultsloader.load_results_database( project_name, run_localtime, output_dir, results_database, run_time, rampup, results_ts_interval, user_group_configs) if post_run_script is not None: logger.info('running post_run_script: %s\n', post_run_script) subprocess.call(post_run_script) logger.info('done.') if remote_starter is not None: remote_starter.test_running = False remote_starter.output_dir = output_dir return