Ejemplo n.º 1
0
def rerun_results(results_dir):
    output_dir = 'projects/%s/results/%s/' % (project_name, results_dir)
    saved_config = '%s/config.cfg' % output_dir
    run_time, rampup, console_logging, results_ts_interval, user_group_configs, results_database, post_run_script = configure(project_name, config_file=saved_config)
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs)
    print 'created: %sresults.html\n' % output_dir
Ejemplo n.º 2
0
def rerun_results(results_dir):
    output_dir = 'projects/%s/results/%s/' % (project_name, results_dir)
    saved_config = '%s/config.cfg' % output_dir
    run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, user_group_configs = configure(project_name, config_file=saved_config)
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs, xml_report)
    print 'created: %sresults.html\n' % output_dir
    if xml_report:
        print 'created: %sresults.jtl' % output_dir
        print 'created: last_results.jtl\n'
Ejemplo n.º 3
0
def run_test():
    run_localtime = time.localtime()
    output_dir = time.strftime('results/results_%Y.%m.%d_%H.%M.%S/', run_localtime)

    # this queue is shared between all processes/threads
    queue = multiprocessing.Queue()
    rw = ResultsWriter(queue, output_dir)
    rw.daemon = True
    rw.start()

    ug = UserGroup(queue, 1, ugname, num_threads, run_time, rampup)
    ug.start()

    start_time = time.time()
    #ug.join()

    print '  threads: %i\n' % (num_threads)
    p = progressbar.ProgressBar(run_time)
    elapsed = 0
    while elapsed < (run_time + 1):
        p.update_time(elapsed)
        print '%s   transactions: %i  timers: %i  errors: %i' % (p, rw.trans_count, rw.timer_count, rw.error_count)
        sys.stdout.write(chr(27) + '[A' )
        time.sleep(1)
        elapsed = time.time() - start_time
    print p

    i = 0
    while ug.is_alive():
        print 'waiting for all requests to finish...'
        time.sleep(3)
        if i > 20:
            ug.terminate()
        i += 1

    # all agents are done running at this point
    time.sleep(.2) # make sure the writer queue is flushed
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs)
    print 'created: %sresults.html\n' % output_dir
    print 'done.\n'

    return
Ejemplo n.º 4
0
def run_test(remote_starter=None):
    if remote_starter is not None:
        remote_starter.test_running = True
        remote_starter.output_dir = None

    (
        run_time,
        rampup,
        console_logging,
        results_ts_interval,
        user_group_configs,
        results_database,
        post_run_script,
        project_config_script,
    ) = configure(project_name)

    run_localtime = time.localtime()
    output_dir = time.strftime("projects/" + project_name + "/results/results_%Y.%m.%d_%H.%M.%S/", run_localtime)

    # get project configuration
    if project_config_script is not None:
        print "running project_config_script: %s\n" % project_config_script
        # in python 2.7, we can just use the check_output command instead of Popen
        process = subprocess.Popen([project_config_script], shell=True, stdout=subprocess.PIPE)
        project_config_data = process.communicate()[0]
    else:
        project_config_data = ""

    # this queue is shared between all processes/threads
    queue = multiprocessing.Queue()
    rw = ResultsWriter(queue, output_dir, console_logging)
    rw.daemon = True
    rw.start()

    user_groups = []
    for i, ug_config in enumerate(user_group_configs):
        ug = UserGroup(
            queue,
            i,
            ug_config.name,
            ug_config.num_threads,
            ug_config.script_file,
            ug_config.script_options,
            run_time,
            rampup,
        )

        user_groups.append(ug)
    for user_group in user_groups:
        user_group.start()

    start_time = time.time()

    if console_logging:
        for user_group in user_groups:
            user_group.join()
    else:
        print "\n  user_groups:  %i" % len(user_groups)
        print "  threads: %i\n" % (ug_config.num_threads * len(user_groups))
        p = progressbar.ProgressBar(run_time)
        elapsed = 0
        while elapsed < (run_time + 1):
            p.update_time(elapsed)
            if sys.platform.startswith("win"):
                print "%s   transactions: %i  timers: %i  errors: %i\r" % (
                    p,
                    rw.trans_count,
                    rw.timer_count,
                    rw.error_count,
                ),
            else:
                print "%s   transactions: %i  timers: %i  errors: %i" % (
                    p,
                    rw.trans_count,
                    rw.timer_count,
                    rw.error_count,
                )
                sys.stdout.write(chr(27) + "[A")
            time.sleep(1)
            elapsed = time.time() - start_time

        print p

        while [user_group for user_group in user_groups if user_group.is_alive()] != []:
            if sys.platform.startswith("win"):
                print "waiting for all requests to finish...\r",
            else:
                print "waiting for all requests to finish...\r"
                sys.stdout.write(chr(27) + "[A")
            time.sleep(0.5)

        if not sys.platform.startswith("win"):
            print

    # all agents are done running at this point
    time.sleep(0.2)  # make sure the writer queue is flushed
    print "\n\nanalyzing results...\n"
    results.output_results(
        output_dir, "results.csv", run_time, rampup, results_ts_interval, user_group_configs, project_config_data
    )
    print "created: %sresults.html\n" % output_dir

    # copy config file to results directory
    project_config = os.sep.join(["projects", project_name, "config.cfg"])
    saved_config = os.sep.join([output_dir, "config.cfg"])
    shutil.copy(project_config, saved_config)

    with open(os.path.join(output_dir, "project_config_data.txt"), "w") as f:
        f.write(project_config_data)

    if results_database is not None:
        print "loading results into database: %s\n" % results_database
        import lib.resultsloader

        lib.resultsloader.load_results_database(
            project_name,
            run_localtime,
            output_dir,
            results_database,
            run_time,
            rampup,
            results_ts_interval,
            user_group_configs,
        )

    if post_run_script is not None:
        print "running post_run_script: %s\n" % post_run_script
        subprocess.call(post_run_script)

    print "done.\n"

    if remote_starter is not None:
        remote_starter.test_running = False
        remote_starter.output_dir = output_dir

    return
Ejemplo n.º 5
0
def run_test(remote_starter=None):
    if remote_starter is not None:
        remote_starter.test_running = True
        remote_starter.output_dir = None
        
    run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, user_group_configs = configure(project_name)
    
    run_localtime = time.localtime() 
    output_dir = time.strftime('projects/' + project_name + '/results/results_%Y.%m.%d_%H.%M.%S/', run_localtime) 
        
    # this queue is shared between all processes/threads
    queue = multiprocessing.Queue()
    rw = ResultsWriter(queue, output_dir, console_logging)
    rw.daemon = True
    rw.start()
    
    user_groups = [] 
    for i, ug_config in enumerate(user_group_configs):
        ug = UserGroup(queue, i, ug_config.name, ug_config.num_threads, ug_config.script_file, run_time, rampup)
        user_groups.append(ug)    
    for user_group in user_groups:
        user_group.start()
        
    start_time = time.time() 
    
    if console_logging:
        for user_group in user_groups:
            user_group.join()
    else:
        print '\n  user_groups:  %i' % len(user_groups)
        print '  threads: %i\n' % (ug_config.num_threads * len(user_groups))
        
        if progress_bar:
            p = progressbar.ProgressBar(run_time)
            elapsed = 0
            while elapsed < (run_time + 1):
                p.update_time(elapsed)
                if sys.platform.startswith('win'):
                    print '%s   transactions: %i  timers: %i  errors: %i\r' % (p, rw.trans_count, rw.timer_count, rw.error_count),
                else:
                    print '%s   transactions: %i  timers: %i  errors: %i' % (p, rw.trans_count, rw.timer_count, rw.error_count)
                    sys.stdout.write(chr(27) + '[A' )
                time.sleep(1)
                elapsed = time.time() - start_time
            
            print p
            
        while [user_group for user_group in user_groups if user_group.is_alive()] != []:
            if progress_bar:
                if sys.platform.startswith('win'):
                    print 'waiting for all requests to finish...\r',
                else:
                    print 'waiting for all requests to finish...\r'
                    sys.stdout.write(chr(27) + '[A' )
            time.sleep(.5)
            
        if not sys.platform.startswith('win'):
            print

    # all agents are done running at this point
    time.sleep(.2) # make sure the writer queue is flushed
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs, xml_report)
    print 'created: %sresults.html\n' % output_dir
    if xml_report:
        print 'created: %sresults.jtl' % output_dir
        print 'created: last_results.jtl\n'
    
    # copy config file to results directory
    project_config = os.sep.join(['projects', project_name, 'config.cfg'])
    saved_config = os.sep.join([output_dir, 'config.cfg'])
    shutil.copy(project_config, saved_config)
    
    if results_database is not None:
        print 'loading results into database: %s\n' % results_database
        import lib.resultsloader
        lib.resultsloader.load_results_database(project_name, run_localtime, output_dir, results_database, 
                run_time, rampup, results_ts_interval, user_group_configs)
    
    if post_run_script is not None:
        print 'running post_run_script: %s\n' % post_run_script
        subprocess.call(post_run_script)
        
    print 'done.\n'
    
    if remote_starter is not None:
        remote_starter.test_running = False
        remote_starter.output_dir = output_dir
    
    return
Ejemplo n.º 6
0
def run_test(remote_starter=None):
    if remote_starter is not None:
        remote_starter.test_running = True
        remote_starter.output_dir = None
        
    run_time, rampup, console_logging, results_ts_interval, user_group_configs, results_database, post_run_script = configure(project_name)
    
    run_localtime = time.localtime() 
    output_dir = time.strftime('projects/' + project_name + '/results/results_%Y.%m.%d_%H.%M.%S/', run_localtime) 
        
    # this queue is shared between all processes/threads
    queue = multiprocessing.Queue()
    rw = ResultsWriter(queue, output_dir, console_logging)
    rw.daemon = True
    rw.start()
    
    user_groups = [] 
    for i, ug_config in enumerate(user_group_configs):
        ug = UserGroup(queue, i, ug_config.name, ug_config.num_threads, ug_config.script_file, run_time, rampup)
        user_groups.append(ug)    
    for user_group in user_groups:
        user_group.start()
        
    start_time = time.time() 
    
    if console_logging:
        for user_group in user_groups:
            user_group.join()
    else:
        print '\n  user_groups:  %i' % len(user_groups)
        print '  threads: %i\n' % (ug_config.num_threads * len(user_groups))
        p = progressbar.ProgressBar(run_time)
        elapsed = 0
        while elapsed < (run_time + 1):
            p.update_time(elapsed)
            if sys.platform.startswith('win'):
                print '%s   transactions: %i  timers: %i  errors: %i\r' % (p, rw.trans_count, rw.timer_count, rw.error_count),
            else:
                print '%s   transactions: %i  timers: %i  errors: %i' % (p, rw.trans_count, rw.timer_count, rw.error_count)
                sys.stdout.write(chr(27) + '[A' )
            time.sleep(1)
            elapsed = time.time() - start_time
        
        print p
        
        while [user_group for user_group in user_groups if user_group.is_alive()] != []:
            if sys.platform.startswith('win'):
                print 'waiting for all requests to finish...\r',
            else:
                print 'waiting for all requests to finish...\r'
                sys.stdout.write(chr(27) + '[A' )
            time.sleep(.5)
            
        if not sys.platform.startswith('win'):
            print

    # all agents are done running at this point
    time.sleep(.2) # make sure the writer queue is flushed
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs)
    print 'created: %sresults.html\n' % output_dir
    
    # copy config file to results directory
    project_config = os.sep.join(['projects', project_name, 'config.cfg'])
    saved_config = os.sep.join([output_dir, 'config.cfg'])
    shutil.copy(project_config, saved_config)
    
    if results_database is not None:
        print 'loading results into database: %s\n' % results_database
        import lib.resultsloader
        lib.resultsloader.load_results_database(project_name, run_localtime, output_dir, results_database, 
                run_time, rampup, results_ts_interval, user_group_configs)
    
    if post_run_script is not None:
        print 'running post_run_script: %s\n' % post_run_script
        subprocess.call(post_run_script)
        
    print 'done.\n'
    
    if remote_starter is not None:
        remote_starter.test_running = False
        remote_starter.output_dir = output_dir
    
    return
Ejemplo n.º 7
0
import lib.results as results
import optparse

class UserGroupConfig(object):
    def __init__(self, num_threads, name, script_file):
        self.num_threads = num_threads
        self.name = name
        self.script_file = script_file


parser = optparse.OptionParser(usage='Usage: %prog <result_dir> <runtime>')
cmd_opts, args = parser.parse_args()
output_dir = args[0]
run_time = int(args[1])
num_threads = int(args[2])
rampup = run_time
user_group_configs = [UserGroupConfig(num_threads, "group-1", "example_br.py")]
results_ts_interval = 5

results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs)

Ejemplo n.º 8
0
import lib.results as results
import optparse


class UserGroupConfig(object):
    def __init__(self, num_threads, name, script_file):
        self.num_threads = num_threads
        self.name = name
        self.script_file = script_file


parser = optparse.OptionParser(usage='Usage: %prog <result_dir> <runtime>')
cmd_opts, args = parser.parse_args()
output_dir = args[0]
run_time = int(args[1])
num_threads = int(args[2])
rampup = run_time
user_group_configs = [UserGroupConfig(num_threads, "group-1", "example_br.py")]
results_ts_interval = 5

results.output_results(output_dir, 'results.csv', run_time, rampup,
                       results_ts_interval, user_group_configs)