Beispiel #1
0
def run_api_concurrent_benchmark(core_api, function_to_call, name_prefix):
    """Call given API endpoint concurrently."""
    measurement_count = 1
    min_thread_count = 1
    max_thread_count = 2
    pauses = [2.0, 1.5, 1.0, 0.5, 0]  # 2, 1, 0.5, 0]
    pauses = [
        0.0,
    ]

    summary_min_times = []
    summary_max_times = []
    summary_avg_times = []

    for thread_count in range(min_thread_count, 1 + max_thread_count):
        min_times = []
        max_times = []
        avg_times = []

        for pause in pauses:
            threads = []
            q = queue.Queue()

            for thread_id in range(0, thread_count):
                t = threading.Thread(target=function_to_call,
                                     args=(core_api, measurement_count, pause,
                                           q, thread_id))
                t.start()
                threads.append(t)

            wait_for_all_threads(threads)

            values = sum([q.get() for t in threads], [])
            title = "core API endpoint, {t} concurrent threads, {s} seconds between calls".format(
                t=thread_count, s=pause)
            name = "{p}_concurrent_{t}_threads_{s}_pause_time".format(
                p=name_prefix, t=thread_count, s=pause)
            graph.generate_wait_times_graph(title, name, values)

            min_times.append(min(values))
            max_times.append(max(values))
            avg_times.append(sum(values) / len(values))

            print("Breathe...")
            time.sleep(BREATHE_PAUSE)

        print(min_times)
        print(max_times)
        print(avg_times)

        summary_min_times.append(min(values))
        summary_max_times.append(max(values))
        summary_avg_times.append(sum(values) / len(values))

        generate_statistic_graph(name_prefix, thread_count, pauses, min_times,
                                 max_times, avg_times)
        print("Breathe (statistic graph)...")
        time.sleep(BREATHE_PAUSE)

    print(summary_min_times)
    print(summary_max_times)
    print(summary_avg_times)
    t = range(min_thread_count, 1 + thread_count)
    graph.generate_timing_threads_statistic_graph(
        "Duration for concurrent API calls", "{p}_{t}".format(p=name_prefix,
                                                              t=thread_count),
        t, summary_min_times, summary_max_times, summary_avg_times)
Beispiel #2
0
def run_component_analysis_concurrent_calls_benchmark(jobs_api, s3):
    """Call component analysis in more threads and collect results."""
    print("Component analysis concurrent benchmark")
    measurement_count = 1
    min_thread_count = 1
    max_thread_count = 100

    summary_min_times = []
    summary_max_times = []
    summary_avg_times = []

    for thread_count in range(min_thread_count, 1 + max_thread_count):
        min_times = []
        max_times = []
        avg_times = []

        threads = []
        q = queue.Queue()

        for thread_id in range(0, thread_count):
            t = threading.Thread(
                target=lambda api, s3, measurement_count, pause_time, q,
                thread_id: benchmarks.component_analysis_thread(
                    api, s3, measurement_count, pause_time, q, thread_id),
                args=(jobs_api, s3, measurement_count, 10, q, thread_id))
            t.start()
            threads.append(t)

        print("---------------------------------")
        print("Waiting for all threads to finish")
        wait_for_all_threads(threads)
        print("Done")

        values = sum([q.get() for t in threads], [])
        print("values")
        print(len(values))
        print(values)
        print("----")
        title = "Component analysis, {t} concurrent threads".format(
            t=thread_count)
        name = "jobs_flow_scheduling_{t}_threads".format(t=thread_count)
        graph.generate_wait_times_graph(title, name, values)

        min_times.append(min(values))
        max_times.append(max(values))
        avg_times.append(sum(values) / len(values))

        print("min_times:", min_times)
        print("max_times:", max_times)
        print("avg_times:", avg_times)

        summary_min_times.append(min(values))
        summary_max_times.append(max(values))
        summary_avg_times.append(sum(values) / len(values))

        generate_statistic_graph("component_analysis", thread_count, [10],
                                 min_times, max_times, avg_times)
        print("Breathe (statistic graph)...")
        time.sleep(BREATHE_PAUSE)

    print(summary_min_times)
    print(summary_max_times)
    print(summary_avg_times)
    t = range(min_thread_count, 1 + thread_count)
    graph.generate_timing_threads_statistic_graph(
        "Duration for concurrent analysis",
        "durations_{i}".format(i=thread_count), t, summary_min_times,
        summary_max_times, summary_avg_times)
Beispiel #3
0
def run_analysis_concurrent_benchmark(api,
                                      s3,
                                      message,
                                      name_prefix,
                                      function_to_call,
                                      thread_counts=None):
    """Universal function to call any callback function in more threads and collect results."""
    thread_counts = thread_counts or [1, 2, 3, 4]
    print(message + " concurrent benchmark")
    measurement_count = 1

    summary_min_times = []
    summary_max_times = []
    summary_avg_times = []

    for thread_count in thread_counts:
        print("Concurrent threads: {c}".format(c=thread_count))
        min_times = []
        max_times = []
        avg_times = []

        threads = []
        q = queue.Queue()

        for thread_id in range(0, thread_count):
            t = threading.Thread(
                target=lambda api, s3, measurement_count,
                pause_time, q, thread_id: function_to_call(
                    api, s3, measurement_count, pause_time, q, thread_id),
                args=(api, s3, measurement_count, 0, q, thread_id))
            t.start()
            threads.append(t)

        print("---------------------------------")
        print("Waiting for all threads to finish")
        wait_for_all_threads(threads)
        print("Done")

        values = [q.get()[0][0]["delta"] for t in threads]
        print("values")
        print(len(values))
        print(values)
        print("----")
        title = "{n}, {t} concurrent threads".format(n=message, t=thread_count)
        name = "{n}_{t}_threads".format(n=name_prefix, t=thread_count)
        graph.generate_wait_times_graph(title, name, values)

        min_times.append(min(values))
        max_times.append(max(values))
        avg_times.append(sum(values) / len(values))

        print("min_times:", min_times)
        print("max_times:", max_times)
        print("avg_times:", avg_times)

        summary_min_times.append(min(values))
        summary_max_times.append(max(values))
        summary_avg_times.append(sum(values) / len(values))

        generate_statistic_graph(name, thread_count, ["min/avg/max"],
                                 min_times, max_times, avg_times)
        print("Breathe (statistic graph)...")
        time.sleep(BREATHE_PAUSE)

    print(summary_min_times)
    print(summary_max_times)
    print(summary_avg_times)

    t = thread_counts
    graph.generate_timing_threads_statistic_graph("Duration for " + message,
                                                  "{p}".format(p=name_prefix),
                                                  t, summary_min_times,
                                                  summary_max_times,
                                                  summary_avg_times)

    with open(name_prefix + ".csv", "w") as csvfile:
        csv_writer = csv.writer(csvfile)
        for i in range(0, len(thread_counts)):
            csv_writer.writerow([
                i, thread_counts[i], summary_min_times[i],
                summary_max_times[i], summary_avg_times[i]
            ])
def run_api_concurrent_benchmark(core_api, function_to_call, name_prefix):
    """Call given API endpoint concurrently."""
    measurement_count = 1
    min_thread_count = 1
    max_thread_count = 2
    pauses = [2.0, 1.5, 1.0, 0.5, 0]  # 2, 1, 0.5, 0]
    pauses = [0.0, ]

    summary_min_times = []
    summary_max_times = []
    summary_avg_times = []

    for thread_count in range(min_thread_count, 1 + max_thread_count):
        min_times = []
        max_times = []
        avg_times = []

        for pause in pauses:
            threads = []
            q = queue.Queue()

            for thread_id in range(0, thread_count):
                t = threading.Thread(target=function_to_call,
                                     args=(core_api, measurement_count, pause, q, thread_id))
                t.start()
                threads.append(t)

            wait_for_all_threads(threads)

            values = sum([q.get() for t in threads], [])
            title = "core API endpoint, {t} concurrent threads, {s} seconds between calls".format(
                t=thread_count, s=pause)
            name = "{p}_concurrent_{t}_threads_{s}_pause_time".format(p=name_prefix,
                                                                      t=thread_count,
                                                                      s=pause)
            graph.generate_wait_times_graph(title, name, values)

            min_times.append(min(values))
            max_times.append(max(values))
            avg_times.append(sum(values) / len(values))

            print("Breathe...")
            time.sleep(BREATHE_PAUSE)

        print(min_times)
        print(max_times)
        print(avg_times)

        summary_min_times.append(min(values))
        summary_max_times.append(max(values))
        summary_avg_times.append(sum(values) / len(values))

        generate_statistic_graph(name_prefix, thread_count, pauses,
                                 min_times, max_times, avg_times)
        print("Breathe (statistic graph)...")
        time.sleep(BREATHE_PAUSE)

    print(summary_min_times)
    print(summary_max_times)
    print(summary_avg_times)
    t = range(min_thread_count, 1 + thread_count)
    graph.generate_timing_threads_statistic_graph("Duration for concurrent API calls",
                                                  "{p}_{t}".format(p=name_prefix, t=thread_count),
                                                  t,
                                                  summary_min_times,
                                                  summary_max_times,
                                                  summary_avg_times)
def run_component_analysis_concurrent_calls_benchmark(jobs_api, s3):
    """Call component analysis in more threads and collect results."""
    print("Component analysis concurrent benchmark")
    measurement_count = 1
    min_thread_count = 1
    max_thread_count = 100

    summary_min_times = []
    summary_max_times = []
    summary_avg_times = []

    for thread_count in range(min_thread_count, 1 + max_thread_count):
        min_times = []
        max_times = []
        avg_times = []

        threads = []
        q = queue.Queue()

        for thread_id in range(0, thread_count):
            t = threading.Thread(target=lambda api, s3, measurement_count, pause_time, q,
                                 thread_id:
                                 benchmarks.component_analysis_thread(api, s3,
                                                                      measurement_count,
                                                                      pause_time, q, thread_id),
                                 args=(jobs_api, s3, measurement_count, 10, q, thread_id))
            t.start()
            threads.append(t)

        print("---------------------------------")
        print("Waiting for all threads to finish")
        wait_for_all_threads(threads)
        print("Done")

        values = sum([q.get() for t in threads], [])
        print("values")
        print(len(values))
        print(values)
        print("----")
        title = "Component analysis, {t} concurrent threads".format(
            t=thread_count)
        name = "jobs_flow_scheduling_{t}_threads".format(t=thread_count)
        graph.generate_wait_times_graph(title, name, values)

        min_times.append(min(values))
        max_times.append(max(values))
        avg_times.append(sum(values) / len(values))

        print("min_times:", min_times)
        print("max_times:", max_times)
        print("avg_times:", avg_times)

        summary_min_times.append(min(values))
        summary_max_times.append(max(values))
        summary_avg_times.append(sum(values) / len(values))

        generate_statistic_graph("component_analysis", thread_count, [10],
                                 min_times, max_times, avg_times)
        print("Breathe (statistic graph)...")
        time.sleep(BREATHE_PAUSE)

    print(summary_min_times)
    print(summary_max_times)
    print(summary_avg_times)
    t = range(min_thread_count, 1 + thread_count)
    graph.generate_timing_threads_statistic_graph("Duration for concurrent analysis",
                                                  "durations_{i}".format(i=thread_count), t,
                                                  summary_min_times,
                                                  summary_max_times,
                                                  summary_avg_times)
def run_analysis_concurrent_benchmark(api, s3, message, name_prefix, function_to_call,
                                      thread_counts=None):
    """Universal function to call any callback function in more threads and collect results."""
    thread_counts = thread_counts or [1, 2, 3, 4]
    print(message + " concurrent benchmark")
    measurement_count = 1

    summary_min_times = []
    summary_max_times = []
    summary_avg_times = []

    for thread_count in thread_counts:
        print("Concurrent threads: {c}".format(c=thread_count))
        min_times = []
        max_times = []
        avg_times = []

        threads = []
        q = queue.Queue()

        for thread_id in range(0, thread_count):
            t = threading.Thread(target=lambda api, s3, measurement_count, pause_time, q,
                                 thread_id:
                                 function_to_call(api, s3, measurement_count,
                                                  pause_time, q, thread_id),
                                 args=(api, s3, measurement_count, 0, q, thread_id))
            t.start()
            threads.append(t)

        print("---------------------------------")
        print("Waiting for all threads to finish")
        wait_for_all_threads(threads)
        print("Done")

        queue_size = q.qsize()
        check_number_of_results(queue_size, thread_count)

        # read all really stored results from the queue
        values = [q.get()[0][0]["delta"] for i in range(queue_size)]
        print("values")
        print("count: {cnt}".format(cnt=len(values)))
        print(values)
        print("----")
        title = "{n}, {t} concurrent threads".format(n=message,
                                                     t=thread_count)
        name = "{n}_{t}_threads".format(n=name_prefix, t=thread_count)
        graph.generate_wait_times_graph(title, name, values)

        min_times.append(min(values))
        max_times.append(max(values))
        avg_times.append(sum(values) / len(values))

        print("min_times:", min_times)
        print("max_times:", max_times)
        print("avg_times:", avg_times)

        summary_min_times.append(min(values))
        summary_max_times.append(max(values))
        summary_avg_times.append(sum(values) / len(values))

        generate_statistic_graph(name, thread_count, ["min/avg/max"],
                                 min_times, max_times, avg_times)
        print("Breathe (statistic graph)...")
        time.sleep(BREATHE_PAUSE)

    print(summary_min_times)
    print(summary_max_times)
    print(summary_avg_times)

    t = thread_counts
    graph.generate_timing_threads_statistic_graph("Duration for " + message,
                                                  "{p}".format(p=name_prefix),
                                                  t,
                                                  summary_min_times,
                                                  summary_max_times,
                                                  summary_avg_times)

    with open(name_prefix + ".csv", "w") as csvfile:
        csv_writer = csv.writer(csvfile)
        for i in range(0, len(thread_counts)):
            csv_writer.writerow([i, thread_counts[i],
                                 summary_min_times[i], summary_max_times[i], summary_avg_times[i]])