示例#1
0
def generate_statistic_graph(name_prefix, thread_count, x_axis_labels, min_times, max_times,
                             avg_times):
    """Generate statistic graph with min, average, and max times."""
    title = "core API endpoint: min, max, and avg times for {t} concurrent threads".format(
        t=thread_count)
    name = "{p}_concurrent_{t}_threads_min_max_avg_times".format(p=name_prefix, t=thread_count)
    graph.generate_timing_statistic_graph(title, name, x_axis_labels,
                                          min_times, max_times, avg_times, 640, 480)
def generate_statistic_graph(name_prefix, thread_count, x_axis_labels, min_times, max_times,
                             avg_times):
    """Generate statistic graph with min, average, and max times."""
    title = "core API endpoint: min, max, and avg times for {t} concurrent threads".format(
        t=thread_count)
    name = "{p}_concurrent_{t}_threads_min_max_avg_times".format(p=name_prefix, t=thread_count)
    graph.generate_timing_statistic_graph(title, name, x_axis_labels,
                                          min_times, max_times, avg_times, 640, 480)
示例#3
0
def run_sequenced_benchmark(
        api,
        s3,
        title_prefix,
        name_prefix,
        function,
        pauses=None,
        measurement_count=SEQUENCED_BENCHMARKS_DEFAULT_COUNT,
        compute_stack_analysis_jobs_durations=False):
    """Start benchmarks by calling selected function sequentially."""
    pauses = pauses or [10]
    print("pauses: {p}".format(p=pauses))
    print("measurement_count: {c}".format(c=measurement_count))

    stack_analysis_jobs_durations = None

    # for the stack analysis we are able to compute statistic for each job
    if compute_stack_analysis_jobs_durations:
        stack_analysis_jobs_durations = {}
        stack_analysis_jobs_durations_min_times = {}
        stack_analysis_jobs_durations_max_times = {}
        stack_analysis_jobs_durations_avg_times = {}
        for job_name in STACK_ANALYSIS_JOB_NAMES:
            stack_analysis_jobs_durations[job_name] = []
            stack_analysis_jobs_durations_min_times[job_name] = []
            stack_analysis_jobs_durations_max_times[job_name] = []
            stack_analysis_jobs_durations_avg_times[job_name] = []

    measurements = []
    min_times = []
    max_times = []
    avg_times = []

    for pause in pauses:
        if len(pauses) > 1:
            title = "{t}, {s} seconds between calls".format(t=title_prefix,
                                                            s=pause)
            name = "{n}_{s}_pause_time".format(n=name_prefix, s=pause)
        else:
            title = "{t}".format(t=title_prefix)
            name = "{n}".format(n=name_prefix)
        print("  " + title)

        values, debug = function(api, s3, measurement_count, pause)
        deltas = [value["delta"] for value in values]

        graph.generate_wait_times_graph(title, name, deltas)
        print("Breathe (statistic graph)...")
        time.sleep(BREATHE_PAUSE)

        min_times.append(min(deltas))
        max_times.append(max(deltas))
        avg_times.append(sum(deltas) / len(deltas))
        measurements.extend(deltas)

        if compute_stack_analysis_jobs_durations:
            for job_name in STACK_ANALYSIS_JOB_NAMES:
                durations = job_durations(job_name, debug)
                # all durations for specific jobs need to be stored here
                stack_analysis_jobs_durations[job_name].extend(durations)
                # compute statistic
                cnt = len(durations)
                stack_analysis_jobs_durations_min_times[job_name].append(
                    min(durations))
                stack_analysis_jobs_durations_max_times[job_name].append(
                    max(durations))
                stack_analysis_jobs_durations_avg_times[job_name].append(
                    sum(durations) / cnt)

    print(min_times)
    print(max_times)
    print(avg_times)

    if compute_stack_analysis_jobs_durations:
        print_job_durations(stack_analysis_jobs_durations,
                            stack_analysis_jobs_durations_min_times,
                            stack_analysis_jobs_durations_max_times,
                            stack_analysis_jobs_durations_avg_times)

    title = "{t}: min. max. and avg times".format(t=title_prefix)
    min_max_avg_name = "{n}_min_max_avg_times".format(n=name_prefix)
    graph.generate_timing_statistic_graph(title, min_max_avg_name, pauses,
                                          min_times, max_times, avg_times)

    export_sequenced_benchmark_into_csv(name, measurements,
                                        compute_stack_analysis_jobs_durations,
                                        stack_analysis_jobs_durations)
def run_sequenced_benchmark(api,
                            s3,
                            title_prefix,
                            name_prefix,
                            function,
                            pauses=None,
                            measurement_count=10,
                            compute_stack_analysis_jobs_durations=False):
    """Start benchmarks by calling selected function sequentially."""
    pauses = pauses or [10]
    print("pauses:")
    print(pauses)

    # for the stack analysis we are able to compute statistic for each job
    if compute_stack_analysis_jobs_durations:
        stack_analysis_jobs_durations_min_times = {}
        stack_analysis_jobs_durations_max_times = {}
        stack_analysis_jobs_durations_avg_times = {}
        for job_name in STACK_ANALYSIS_JOB_NAMES:
            stack_analysis_jobs_durations_min_times[job_name] = []
            stack_analysis_jobs_durations_max_times[job_name] = []
            stack_analysis_jobs_durations_avg_times[job_name] = []

    measurements = []
    min_times = []
    max_times = []
    avg_times = []

    for pause in pauses:
        if len(pauses) > 1:
            title = "{t}, {s} seconds between calls".format(t=title_prefix,
                                                            s=pause)
            name = "{n}_{s}_pause_time".format(n=name_prefix, s=pause)
        else:
            title = "{t}".format(t=title_prefix)
            name = "{n}".format(n=name_prefix)
        print("  " + title)
        values, debug = function(api, s3, measurement_count, pause)
        graph.generate_wait_times_graph(title, name, values)
        print("Breathe (statistic graph)...")
        time.sleep(20)

        min_times.append(min(values))
        max_times.append(max(values))
        avg_times.append(sum(values) / len(values))
        measurements.extend(values)

        if compute_stack_analysis_jobs_durations:
            for job_name in STACK_ANALYSIS_JOB_NAMES:
                durations = job_durations(job_name, debug)
                cnt = len(durations)
                stack_analysis_jobs_durations_min_times[job_name].append(
                    min(durations))
                stack_analysis_jobs_durations_max_times[job_name].append(
                    max(durations))
                stack_analysis_jobs_durations_avg_times[job_name].append(
                    sum(durations) / cnt)

    print(min_times)
    print(max_times)
    print(avg_times)

    if compute_stack_analysis_jobs_durations:
        print("stack analysis jobs")
        for job_name in STACK_ANALYSIS_JOB_NAMES:
            print(job_name)
            print(stack_analysis_jobs_durations_min_times[job_name])
            print(stack_analysis_jobs_durations_max_times[job_name])
            print(stack_analysis_jobs_durations_avg_times[job_name])

    title = "{t}: min. max. and avg times".format(t=title_prefix)
    min_max_avg_name = "{n}_min_max_avg_times".format(n=name_prefix)
    graph.generate_timing_statistic_graph(title, min_max_avg_name, pauses,
                                          min_times, max_times, avg_times)

    with open(name + ".csv", "w") as csvfile:
        csv_writer = csv.writer(csvfile)
        for m in measurements:
            csv_writer.writerow([m])
def run_sequenced_benchmark(api, s3, title_prefix, name_prefix, function,
                            pauses=None, measurement_count=SEQUENCED_BENCHMARKS_DEFAULT_COUNT,
                            compute_stack_analysis_jobs_durations=False):
    """Start benchmarks by calling selected function sequentially."""
    pauses = pauses or [10]
    print("pauses: {p}".format(p=pauses))
    print("measurement_count: {c}".format(c=measurement_count))

    stack_analysis_jobs_durations = None

    # for the stack analysis we are able to compute statistic for each job
    if compute_stack_analysis_jobs_durations:
        stack_analysis_jobs_durations = {}
        stack_analysis_jobs_durations_min_times = {}
        stack_analysis_jobs_durations_max_times = {}
        stack_analysis_jobs_durations_avg_times = {}
        for job_name in STACK_ANALYSIS_JOB_NAMES:
            stack_analysis_jobs_durations[job_name] = []
            stack_analysis_jobs_durations_min_times[job_name] = []
            stack_analysis_jobs_durations_max_times[job_name] = []
            stack_analysis_jobs_durations_avg_times[job_name] = []

    measurements = []
    min_times = []
    max_times = []
    avg_times = []

    for pause in pauses:
        if len(pauses) > 1:
            title = "{t}, {s} seconds between calls".format(t=title_prefix, s=pause)
            name = "{n}_{s}_pause_time".format(n=name_prefix, s=pause)
        else:
            title = "{t}".format(t=title_prefix)
            name = "{n}".format(n=name_prefix)
        print("  " + title)

        values, debug = function(api, s3, measurement_count, pause)
        deltas = [value["delta"] for value in values]

        graph.generate_wait_times_graph(title, name, deltas)
        print("Breathe (statistic graph)...")
        time.sleep(BREATHE_PAUSE)

        min_times.append(min(deltas))
        max_times.append(max(deltas))
        avg_times.append(sum(deltas) / len(deltas))
        measurements.extend(deltas)

        if compute_stack_analysis_jobs_durations:
            for job_name in STACK_ANALYSIS_JOB_NAMES:
                durations = job_durations(job_name, debug)
                # all durations for specific jobs need to be stored here
                stack_analysis_jobs_durations[job_name].extend(durations)
                # compute statistic
                cnt = len(durations)
                stack_analysis_jobs_durations_min_times[job_name].append(min(durations))
                stack_analysis_jobs_durations_max_times[job_name].append(max(durations))
                stack_analysis_jobs_durations_avg_times[job_name].append(sum(durations) / cnt)

    print(min_times)
    print(max_times)
    print(avg_times)

    if compute_stack_analysis_jobs_durations:
        print_job_durations(stack_analysis_jobs_durations, stack_analysis_jobs_durations_min_times,
                            stack_analysis_jobs_durations_max_times,
                            stack_analysis_jobs_durations_avg_times)

    title = "{t}: min. max. and avg times".format(t=title_prefix)
    min_max_avg_name = "{n}_min_max_avg_times".format(n=name_prefix)
    graph.generate_timing_statistic_graph(title, min_max_avg_name,
                                          pauses, min_times, max_times, avg_times)

    export_sequenced_benchmark_into_csv(name, measurements,
                                        compute_stack_analysis_jobs_durations,
                                        stack_analysis_jobs_durations)