예제 #1
0
def rerun_results(project_name, cmd_opts, results_dir):
    output_dir = '%s/%s/results/%s/' % (cmd_opts.projects_dir, project_name, results_dir)
    saved_config = '%s/config.cfg' % output_dir
    run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, user_group_configs = configure(project_name, cmd_opts, config_file=saved_config)
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs, xml_report)
    print 'created: %sresults.html\n' % output_dir
    if xml_report:
        print 'created: %sresults.jtl' % output_dir
        print 'created: last_results.jtl\n'
예제 #2
0
def rerun_results(project_name, cmd_opts, results_dir):
    output_dir = '%s/%s/results/%s/' % (cmd_opts.projects_dir, project_name, results_dir)
    saved_config = '%s/config.cfg' % output_dir
    run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, user_group_configs, global_config = configure(project_name, cmd_opts, config_file=saved_config)
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs, xml_report)
    print 'created: %sresults.html\n' % output_dir
    if xml_report:
        print 'created: %sresults.jtl' % output_dir
        print 'created: last_results.jtl\n'
예제 #3
0
def rerun_results(project_name, cmd_opts, results_dir):
    output_dir = "%s/%s/results/%s/" % (cmd_opts.projects_dir, project_name, results_dir)
    saved_config = "%s/config.cfg" % output_dir
    # run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, pre_run_script, post_run_script, xml_report, user_group_configs = configure(project_name, cmd_opts, config_file=saved_config)
    run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, pre_run_script, post_run_script, xml_report, user_group_configs, generator_scripts = configure(
        project_name, cmd_opts, config_file=saved_config
    )
    print "\n\nanalyzing results...\n"
    results.output_results(
        output_dir, "results.csv", run_time, rampup, results_ts_interval, user_group_configs, xml_report
    )
    print "created: %sresults.html\n" % output_dir
    if xml_report:
        print "created: %sresults.jtl" % output_dir
        print "created: last_results.jtl\n"
예제 #4
0
def rerun_results(project_name, cmd_opts, results_dir):
    output_dir = os.path.join(cmd_opts.projects_dir, project_name, 'results',
                              results_dir)
    saved_config = os.path.join(output_dir, 'config.cfg')
    run_time, transaction_limit, rampup, results_ts_interval, console_logging,\
        progress_bar, results_database, post_run_script, xml_report,\
        user_group_configs = configure(project_name, cmd_opts,
                                       config_file=saved_config)
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time,
                           transaction_limit, rampup, results_ts_interval,
                           user_group_configs, xml_report)
    print 'created: %s\n' % os.path.join(output_dir, 'results.html')
    if xml_report:
        print 'created: %s' % os.path.join(output_dir, 'results.jtl')
        print 'created: last_results.jtl\n'
예제 #5
0
def rerun_results(project_name, cmd_opts, results_dir):
    output_dir = os.path.join(cmd_opts.projects_dir, project_name, 'results',
                              results_dir)
    saved_config = os.path.join(output_dir, 'config.cfg')
    run_time, transaction_limit, rampup, results_ts_interval, console_logging,\
        progress_bar, results_database, post_run_script, xml_report,\
        user_group_configs = configure(project_name, cmd_opts,
                                       config_file=saved_config)
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time,
                           transaction_limit, rampup, results_ts_interval,
                           user_group_configs, xml_report)
    print 'created: %s\n' % os.path.join(output_dir, 'results.html')
    if xml_report:
        print 'created: %s' % os.path.join(output_dir, 'results.jtl')
        print 'created: last_results.jtl\n'
예제 #6
0
def run_test(project_name, cmd_opts, remote_starter=None):
    if remote_starter is not None:
        remote_starter.test_running = True
        remote_starter.output_dir = None

    run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, post_run_script, xml_report, user_group_configs = configure(project_name, cmd_opts)

    run_localtime = time.localtime()
    if cmd_opts.output_dir:
        output_dir = cmd_opts.output_dir
    else:
        output_dir = '%s/%s/results/results_%s' % (cmd_opts.projects_dir, project_name, time.strftime('%Y.%m.%d_%H.%M.%S/', run_localtime))

    # this queue is shared between all processes/threads
    queue = multiprocessing.Queue()
    rw = resultswriter.ResultsWriter(queue, output_dir, console_logging)
    rw.daemon = True
    rw.start()

    script_prefix = os.path.join(cmd_opts.projects_dir, project_name, "test_scripts")
    script_prefix = os.path.normpath(script_prefix)

    user_groups = []
    for i, ug_config in enumerate(user_group_configs):
        script_file = os.path.join(script_prefix, ug_config.script_file)
        ug = core.UserGroup(queue, i, ug_config.name, ug_config.num_threads,
                            script_file, run_time, rampup)
        user_groups.append(ug)
    for user_group in user_groups:
        user_group.start()

    start_time = time.time()

    if console_logging:
        for user_group in user_groups:
            user_group.join()
    else:
        print '\n  user_groups:  %i' % len(user_groups)
        print '  threads: %i\n' % (ug_config.num_threads * len(user_groups))

        if progress_bar:
            p = progressbar.ProgressBar(run_time)
            elapsed = 0
            while elapsed < (run_time + 1):
                p.update_time(elapsed)
                if sys.platform.startswith('win'):
                    print '%s   transactions: %i  timers: %i  errors: %i\r' % (p, rw.trans_count, rw.timer_count, rw.error_count),
                else:
                    print '%s   transactions: %i  timers: %i  errors: %i' % (p, rw.trans_count, rw.timer_count, rw.error_count)
                    sys.stdout.write(chr(27) + '[A' )
                time.sleep(1)
                elapsed = time.time() - start_time

            print p

        while [user_group for user_group in user_groups if user_group.is_alive()] != []:
            if progress_bar:
                if sys.platform.startswith('win'):
                    print 'waiting for all requests to finish...\r',
                else:
                    print 'waiting for all requests to finish...\r'
                    sys.stdout.write(chr(27) + '[A' )
            time.sleep(.5)

        if not sys.platform.startswith('win'):
            print

    # all agents are done running at this point
    time.sleep(.2) # make sure the writer queue is flushed
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time, rampup, results_ts_interval, user_group_configs, xml_report)
    print 'created: %sresults.html\n' % output_dir
    if xml_report:
        print 'created: %sresults.jtl' % output_dir
        print 'created: last_results.jtl\n'

    # copy config file to results directory
    project_config = os.sep.join([cmd_opts.projects_dir, project_name, 'config.cfg'])
    saved_config = os.sep.join([output_dir, 'config.cfg'])
    shutil.copy(project_config, saved_config)

    if results_database is not None:
        print 'loading results into database: %s\n' % results_database
        import multimechanize.resultsloader
        multimechanize.resultsloader.load_results_database(project_name, run_localtime, output_dir, results_database,
                run_time, rampup, results_ts_interval, user_group_configs)

    if post_run_script is not None:
        print 'running post_run_script: %s\n' % post_run_script
        subprocess.call(post_run_script)

    print 'done.\n'

    if remote_starter is not None:
        remote_starter.test_running = False
        remote_starter.output_dir = output_dir

    return
예제 #7
0
 def generate_html(self):
     self.text_box.insert(Tkinter.END, '\n\nanalyzing results...\n')
     results.output_results(self.results_directory, 'results.csv', self.run_time, self.rampup, self.results_ts_interval)
     self.text_box.insert(Tkinter.END, 'created: %sresults.html\n' % self.results_directory)
예제 #8
0
def run_test(project_name, cmd_opts, remote_starter=None):
    if remote_starter is not None:
        remote_starter.test_running = True
        remote_starter.output_dir = None

    run_time, transaction_limit, rampup, results_ts_interval, \
        console_logging, progress_bar, results_database, post_run_script, \
        xml_report, user_group_configs = configure(project_name, cmd_opts)

    run_localtime = time.localtime()
    output_dir = os.path.join(
        cmd_opts.projects_dir, project_name, 'results',
        time.strftime('%Y.%m.%d_%H.%M.%S/', run_localtime))

    # this queue is shared between all processes/threads
    queue = multiprocessing.Queue()
    rw = resultswriter.ResultsWriter(queue, output_dir, console_logging)
    rw.daemon = True
    rw.start()

    script_prefix = os.path.join(cmd_opts.projects_dir, project_name,
                                 'test_scripts')
    script_prefix = os.path.normpath(script_prefix)

    user_groups = []
    for i, ug_config in enumerate(user_group_configs):
        script_file = os.path.join(script_prefix, ug_config.script_file)
        ug = core.UserGroup(queue, i, ug_config.name, ug_config.num_threads,
                            script_file, run_time, transaction_limit, rampup)
        user_groups.append(ug)
    for user_group in user_groups:
        user_group.start()

    start_time = time.time()

    if console_logging:
        for user_group in user_groups:
            user_group.join()
    else:
        print '\n  user_groups:  %i' % len(user_groups)
        print '  threads: %i\n' % (ug_config.num_threads * len(user_groups))

        if progress_bar:
            p = progressbar.ProgressBar(run_time)
            elapsed = 0
            while elapsed < (run_time + 1):
                p.update_time(elapsed)
                if sys.platform.startswith('win'):
                    print '%s   transactions: %i  timers: %i  errors: %i\r' % \
                          (p, rw.trans_count, rw.timer_count, rw.error_count),
                else:
                    print '%s   transactions: %i  timers: %i  errors: %i' % \
                          (p, rw.trans_count, rw.timer_count, rw.error_count)
                    sys.stdout.write(chr(27) + '[A')
                time.sleep(1)
                elapsed = time.time() - start_time

            print p

        while [ug for ug in user_groups if ug.is_alive()]:
            if progress_bar:
                if sys.platform.startswith('win'):
                    print 'waiting for all requests to finish...\r',
                else:
                    print 'waiting for all requests to finish...\r'
                    sys.stdout.write(chr(27) + '[A')
            time.sleep(.5)

        if not sys.platform.startswith('win'):
            print

    # all agents are done running at this point
    time.sleep(.2)  # make sure the writer queue is flushed
    print '\n\nanalyzing results...\n'
    results.output_results(output_dir, 'results.csv', run_time,
                           transaction_limit, rampup, results_ts_interval,
                           user_group_configs, xml_report)
    print 'created: %s\n' % os.path.join(output_dir, 'results.html')
    if xml_report:
        print 'created: %s' % os.path.join(output_dir, 'results.jtl')
        print 'created: last_results.jtl\n'

    # copy config file to results directory
    project_config = os.sep.join(
        [cmd_opts.projects_dir, project_name, cmd_opts.config_file])
    saved_config = os.sep.join([output_dir, cmd_opts.config_file])
    shutil.copy(project_config, saved_config)

    if results_database is not None:
        print 'loading results into database: %s\n' % results_database
        import multimechanize.resultsloader
        multimechanize.resultsloader.load_results_database(
            project_name, run_localtime, output_dir, results_database,
            run_time, transaction_limit, rampup, results_ts_interval,
            user_group_configs)

    if post_run_script is not None:
        print 'running post_run_script: %s\n' % post_run_script
        subprocess.call(post_run_script)

    print 'done.\n'

    if remote_starter is not None:
        remote_starter.test_running = False
        remote_starter.output_dir = output_dir

    return
예제 #9
0
def run_test(project_name, cmd_opts, remote_starter=None):
    if remote_starter is not None:
        remote_starter.test_running = True
        remote_starter.output_dir = None

    run_time, rampup, results_ts_interval, console_logging, progress_bar, results_database, pre_run_script, post_run_script, xml_report, user_group_configs, generator_scripts = configure(
        project_name, cmd_opts
    )

    # Run setup script
    if pre_run_script is not None:
        cmd = "{0}/{1}/test_scripts/{2}".format(cmd_opts.projects_dir, project_name, pre_run_script)
        print "Running global pre_run_script: %s\n" % cmd
        subprocess.call(cmd)

    run_localtime = time.localtime()
    output_dir = "%s/%s/results/results_%s" % (
        cmd_opts.projects_dir,
        project_name,
        time.strftime("%Y.%m.%d_%H.%M.%S/", run_localtime),
    )

    generators = setup_generators(cmd_opts.projects_dir, project_name, generator_scripts)
    # this queue is shared between all processes/threads
    queue = multiprocessing.Queue()
    rw = resultswriter.ResultsWriter(queue, output_dir, console_logging)
    rw.daemon = True
    rw.start()
    script_prefix = os.path.join(cmd_opts.projects_dir, project_name, "test_scripts")
    script_prefix = os.path.normpath(script_prefix)

    user_groups = []
    for i, ug_config in enumerate(user_group_configs):
        script_file = os.path.join(script_prefix, ug_config.script_file)
        gen_cli = None
        if ug_config.generator:
            gen_cli = generators[ug_config.generator].get_client()
        ug = core.UserGroup(
            queue,
            i,
            ug_config.name,
            ug_config.num_threads,
            script_file,
            run_time,
            rampup,
            gen_cli,
            ug_config.user_group_global_config,
        )
        user_groups.append(ug)
    for user_group in user_groups:
        user_group.start()
        atexit.register(user_group.terminate)

    start_time = time.time()

    if console_logging:
        for user_group in user_groups:
            user_group.join()
    else:
        print "\n  user_groups:  %i" % len(user_groups)
        print "  threads: %i\n" % (ug_config.num_threads * len(user_groups))

        if progress_bar:
            p = progressbar.ProgressBar(run_time)
            elapsed = 0
            while elapsed < (run_time + 1):
                p.update_time(elapsed)
                if sys.platform.startswith("win"):
                    print "%s   transactions: %i  timers: %i  errors: %i\r" % (
                        p,
                        rw.trans_count,
                        rw.timer_count,
                        rw.error_count,
                    ),
                else:
                    print "%s   transactions: %i  timers: %i  errors: %i" % (
                        p,
                        rw.trans_count,
                        rw.timer_count,
                        rw.error_count,
                    )
                    sys.stdout.write(chr(27) + "[A")
                time.sleep(1)
                elapsed = time.time() - start_time

            print p

        while [user_group for user_group in user_groups if user_group.is_alive()] != []:
            if progress_bar:
                if sys.platform.startswith("win"):
                    print "waiting for all requests to finish...\r",
                else:
                    print "waiting for all requests to finish...\r"
                    sys.stdout.write(chr(27) + "[A")
            time.sleep(0.5)

        if not sys.platform.startswith("win"):
            print

    # all agents are done running at this point
    time.sleep(0.2)  # make sure the writer queue is flushed
    print "\n\nanalyzing results...\n"
    results.output_results(
        output_dir, "results.csv", run_time, rampup, results_ts_interval, user_group_configs, xml_report
    )
    print "created: %sresults.html\n" % output_dir
    if xml_report:
        print "created: %sresults.jtl" % output_dir
        print "created: last_results.jtl\n"

    # copy config file to results directory
    project_config = os.sep.join([cmd_opts.projects_dir, project_name, "config.cfg"])
    saved_config = os.sep.join([output_dir, "config.cfg"])
    shutil.copy(project_config, saved_config)

    if results_database is not None:
        print "loading results into database: %s\n" % results_database
        import multimechanize.resultsloader

        multimechanize.resultsloader.load_results_database(
            project_name,
            run_localtime,
            output_dir,
            results_database,
            run_time,
            rampup,
            results_ts_interval,
            user_group_configs,
        )

    if post_run_script is not None:
        cmd = "{0}/{1}/test_scripts/{2}".format(cmd_opts.projects_dir, project_name, post_run_script)
        print "Running global post_run_script: %s\n" % cmd
        subprocess.call(cmd)

    print "done.\n"

    if remote_starter is not None:
        remote_starter.test_running = False
        remote_starter.output_dir = output_dir

    return