Esempio n. 1
0
def print_summary(mode, array, array_name, unit, df, item_value=None):
    if (utils.print_level & utils.Levels.SUMMARY) and (len(array) > 0):
        result = []
        before = ""
        after = ""
        RED = "\033[1;31m"
        ORANGE = "\033[1;33m"
        WHITE = "\033[1;m"
        GREEN = "\033[1;32m"

        for host in array:
            result.append(df[host].sum())
        if "unstable" in array_name:
            before = RED
            after = WHITE
        if "curious" in array_name:
            before = ORANGE
            after = WHITE

        mean = numpy.mean(result)
        perf_status = ""
        if array_name == "consistent":
            if item_value is not None:
                if mode == "loops_per_sec" or mode == "bogomips":
                    min_cpu_perf = perf_cpu_tables.get_cpu_min_perf(mode, item_value)
                    if min_cpu_perf == 0:
                        perf_status = ": " + ORANGE + "NO PERF ENTRY IN DB" + WHITE + " for " + item_value
                    elif (mean >= min_cpu_perf):
                        perf_status = ": " + GREEN + "PERF OK" + WHITE
                    else:
                        perf_status = ": " + RED + "PERF FAIL" + WHITE + " as min perf should have been : " + str(min_cpu_perf)
        utils.do_print(mode, utils.Levels.SUMMARY, "%3d %s%-10s%s hosts with %8.2f %-4s as average value and %8.2f standard deviation %s", len(array), before, array_name, after, mean, unit, numpy.std(result), perf_status)
Esempio n. 2
0
def print_summary(mode, array, array_name, unit, df, item_value=None):
    if (utils.print_level & utils.Levels.SUMMARY) and (len(array) > 0):
        result = []
        before = ""
        after = ""
        RED = "\033[1;31m"
        ORANGE = "\033[1;33m"
        WHITE = "\033[1;m"
        GREEN = "\033[1;32m"

        for host in array:
            result.append(df[host].sum())
        if "unstable" in array_name:
            before = RED
            after = WHITE
        if "curious" in array_name:
            before = ORANGE
            after = WHITE

        mean = numpy.mean(result)
        perf_status = ""
        if array_name == "consistent":
            if item_value is not None:
                if mode == "loops_per_sec" or mode == "bogomips":
                    min_cpu_perf = perf_cpu_tables.get_cpu_min_perf(mode, item_value)
                    if min_cpu_perf == 0:
                        perf_status = ": " + ORANGE + "NO PERF ENTRY IN DB" + WHITE + " for " + item_value
                    elif (mean >= min_cpu_perf):
                        perf_status = ": " + GREEN + "PERF OK" + WHITE
                    else:
                        perf_status = ": " + RED + "PERF FAIL" + WHITE + " as min perf should have been : " + str(min_cpu_perf)
        utils.do_print(mode, utils.Levels.SUMMARY, "%3d %s%-10s%s hosts with %8.2f %-4s as average value and %8.2f standard deviation %s", len(array), before, array_name, after, mean, unit, numpy.std(result), perf_status)
Esempio n. 3
0
def memory_perf(systems, group_number, detail_options):
    print "Group %d : Checking Memory perf" % group_number
    modes = ['1K', '4K', '1M', '16M', '128M', '1G', '2G']
    sets = search_item(systems, "cpu", "(.*)", [], modes)
    for mode in modes:
        real_mode = "Memory benchmark %s" % mode
        results = {}
        threaded_perf = dict()
        forked_perf = dict()
        for system in sets:
            memory = []
            series = []
            threaded_perf[system] = 0
            forked_perf[system] = 0
            for perf in sets[system]:
                if (mode in perf[2]):
                    # We shall split individual cpu benchmarking from the global one
                    if ("logical_" in perf[1]) and (("bandwidth_%s" % mode) in perf[2]):
                        if (not perf[1] in memory):
                            memory.append(perf[1])
                        series.append(float(perf[3]))
                    elif ("threaded_bandwidth_%s" % mode) in perf[2]:
                        threaded_perf[system] = float(perf[3])
                    elif ("forked_bandwidth_%s" % mode) in perf[2]:
                        forked_perf[system] = float(perf[3])
            results[system] = Series(series, index=memory)

        consistent = []
        curious = []
        unstable = []
        details = []
        matched_category = ''

        df = DataFrame(results)
        for memory in df.transpose().columns:
            print_perf(1, 7, df.transpose()[memory], df, real_mode, memory, consistent, curious, unstable)
            matched_category = []
            prepare_detail(detail_options, group_number, mode, memory, details, matched_category)

        print_detail(detail_options, details, df, matched_category)
        print_summary(mode, consistent, "consistent", "MB/s", df)
        print_summary(mode, curious, "curious", "MB/s", df)
        print_summary(mode, unstable, "unstable", "MB/s", df)

        for bench_type in ["threaded", "forked"]:
            efficiency = {}
            have_forked_or_threaded = False
            if ("threaded" in bench_type):
                mode_text = "Thread effi."
            else:
                mode_text = "Forked Effi."
            for system in sets:
                host_efficiency_full_load = []
                host_perf = df[system].sum()
                if (host_perf > 0) and (threaded_perf[system] > 0) and (forked_perf[system] > 0):
                    have_forked_or_threaded = True
                    if ("threaded" in bench_type):
                        host_efficiency_full_load.append(threaded_perf[system] / host_perf * 100)
                    else:
                        host_efficiency_full_load.append(forked_perf[system] / host_perf * 100)

                    efficiency[system] = Series(host_efficiency_full_load, index=[mode_text])

            details = []
            memory_eff = DataFrame(efficiency)
            if have_forked_or_threaded is True:
                consistent = []
                curious = []
                unstable = []

                for memory in memory_eff.transpose().columns:
                    print_perf(2, 10, memory_eff.transpose()[memory], memory_eff, real_mode, memory, consistent, curious, unstable)
                    matched_category = []
                    prepare_detail(detail_options, group_number, mode, memory, details, matched_category)

                # Let's pad if its a thread or forked effi in addition of the block size
                if matched_category:
                    matched_category[0] += " " + mode_text

                print_detail(detail_options, details, memory_eff, matched_category)
                print_summary(mode + " " + mode_text, consistent, "consistent", "%", memory_eff)
                print_summary(mode + " " + mode_text, curious, "curious", "%", memory_eff)
                print_summary(mode + " " + mode_text, unstable, "unstable", "%", memory_eff)
            else:
                utils.do_print(real_mode, utils.Levels.WARNING, "%-12s : Benchmark not run on this group", mode_text)
        print
Esempio n. 4
0
def print_perf(tolerance_min, tolerance_max, item, df, mode, title, consistent=None, curious=None, unstable=None):
    # Tolerance_min represents the min where variance shall be considered (in %)
    # Tolerance_max represents the maximum that variance represent regarding the average (in %)

    variance_group = item.std()
    mean_group = item.mean()
    min_group = mean_group - 2*variance_group
    max_group = mean_group + 2*variance_group

    utils.do_print(mode, utils.Levels.INFO, "%-12s : Group performance : min=%8.2f, mean=%8.2f, max=%8.2f, stddev=%8.2f", title, item.min(), mean_group, item.max(), variance_group)

    variance_tolerance = compute_variance_percentage(title, df.transpose())
    if (variance_tolerance > tolerance_max):
        utils.do_print(mode, utils.Levels.ERROR, "%-12s : Group's variance is too important : %7.2f%% of %7.2f whereas limit is set to %3.2f%%", title, variance_tolerance, mean_group, tolerance_max)
        utils.do_print(mode, utils.Levels.ERROR, "%-12s : Group performance : UNSTABLE", title)
        for host in df.columns:
            if not host in curious:
                unstable.append(host)
    else:
        curious_performance = False
        for host in df.columns:
            if (("loops_per_sec") in mode) or ("bogomips" in mode):
                mean_host = df[host][title].mean()
            else:
                mean_host = df[host].mean()
            # If the variance is very low, don't try to find the black sheep
            if (variance_tolerance > tolerance_min):
                if (mean_host > max_group):
                    curious_performance = True
                    utils.do_print(mode, utils.Levels.WARNING, "%-12s : %s : Curious overperformance  %7.2f : min_allow_group = %.2f, mean_group = %.2f max_allow_group = %.2f", title, host, mean_host, min_group, mean_group, max_group)
                    if not host in curious:
                        curious.append(host)
                        if host in consistent:
                            consistent.remove(host)
                elif (mean_host < min_group):
                    curious_performance = True
                    utils.do_print(mode, utils.Levels.WARNING, "%-12s : %s : Curious underperformance %7.2f : min_allow_group = %.2f, mean_group = %.2f max_allow_group = %.2f", title, host, mean_host, min_group, mean_group, max_group)
                    if not host in curious:
                        curious.append(host)
                        if host in consistent:
                            consistent.remove(host)
                else:
                    if (not host in consistent) and (not host in curious):
                        consistent.append(host)
            else:
                if (not host in consistent) and (not host in curious):
                    consistent.append(host)

        unit = " "
        if "Effi." in title:
            unit = "%"
        if curious_performance is False:
            utils.do_print(mode, utils.Levels.INFO, "%-12s : Group performance = %7.2f %s : CONSISTENT", title, mean_group, unit)
        else:
            utils.do_print(mode, utils.Levels.WARNING, "%-12s : Group performance = %7.2f %s : SUSPICIOUS", title, mean_group, unit)
Esempio n. 5
0
def memory_perf(systems,
                unique_id,
                group_number,
                detail_options,
                rampup_value=0,
                current_dir=""):
    have_memory_data = False
    modes = ['1K', '4K', '1M', '16M', '128M', '256M', '1G', '2G']
    sets = search_item(systems, unique_id, "cpu", "(.*)", [], modes)
    for mode in modes:
        real_mode = "Memory benchmark %s" % mode
        results = {}
        threaded_perf = dict()
        forked_perf = dict()
        for system in sets:
            memory = []
            series = []
            found_data = ""
            threaded_perf[system] = 0
            forked_perf[system] = 0
            for perf in sets[system]:
                if (mode in perf[2]):
                    # We shall split individual cpu benchmarking from the global one
                    if ("logical_" in perf[1]) and (("bandwidth_%s" % mode)
                                                    in perf[2]):
                        if (not perf[1] in memory):
                            memory.append(perf[1])
                        series.append(float(perf[3]))
                    elif ("threaded_bandwidth_%s" % mode) in perf[2]:
                        threaded_perf[system] = float(perf[3])
                        found_data = float(perf[3])
                    elif ("forked_bandwidth_%s" % mode) in perf[2]:
                        forked_perf[system] = float(perf[3])
                        found_data = float(perf[3])

            if found_data:
                # If no series are populated, it means that a single "All CPU" run was done
                # If so, let's create a single run value
                if not series:
                    series.append(found_data)
                    memory.append("logical")

            results[system] = Series(series, index=memory)

        # No need to continue if no Memory data in this benchmark
        if not results:
            continue

        consistent = []
        curious = []
        unstable = []
        details = []
        matched_category = ''

        df = DataFrame(results)
        for memory in df.transpose().columns:
            if have_memory_data is False:
                print
                print "Group %d : Checking Memory perf" % group_number
                have_memory_data = True

            print_perf(1, 7,
                       df.transpose()[memory], df, real_mode, memory,
                       consistent, curious, unstable, rampup_value,
                       current_dir)
            matched_category = []
            prepare_detail(detail_options, group_number, mode, memory, details,
                           matched_category)

        print_detail(detail_options, details, df, matched_category)
        print_summary(mode, consistent, "consistent", "MB/s", df)
        print_summary(mode, curious, "curious", "MB/s", df)
        print_summary(mode, unstable, "unstable", "MB/s", df)

        for bench_type in ["threaded", "forked"]:
            efficiency = {}
            have_forked_or_threaded = False
            if ("threaded" in bench_type):
                mode_text = "Thread effi."
            else:
                mode_text = "Forked Effi."
            for system in sets:
                host_efficiency_full_load = []
                host_perf = df[system].sum()
                if (host_perf > 0) and (threaded_perf[system] >
                                        0) and (forked_perf[system] > 0):
                    have_forked_or_threaded = True
                    if ("threaded" in bench_type):
                        host_efficiency_full_load.append(
                            threaded_perf[system] / host_perf * 100)
                    else:
                        host_efficiency_full_load.append(forked_perf[system] /
                                                         host_perf * 100)

                    efficiency[system] = Series(host_efficiency_full_load,
                                                index=[mode_text])

            details = []
            memory_eff = DataFrame(efficiency)
            if have_forked_or_threaded is True:
                consistent = []
                curious = []
                unstable = []

                for memory in memory_eff.transpose().columns:
                    print_perf(2, 10,
                               memory_eff.transpose()[memory], memory_eff,
                               real_mode, memory, consistent, curious,
                               unstable)
                    matched_category = []
                    prepare_detail(detail_options, group_number, mode, memory,
                                   details, matched_category)

                # Let's pad if its a thread or forked effi in addition of the block size
                if matched_category:
                    matched_category[0] += " " + mode_text

                print_detail(detail_options, details, memory_eff,
                             matched_category)
                print_summary(mode + " " + mode_text, consistent, "consistent",
                              "%", memory_eff)
                print_summary(mode + " " + mode_text, curious, "curious", "%",
                              memory_eff)
                print_summary(mode + " " + mode_text, unstable, "unstable",
                              "%", memory_eff)
            else:
                utils.do_print(real_mode, utils.Levels.WARNING,
                               "%-12s : Benchmark not run on this group",
                               mode_text)
Esempio n. 6
0
def print_perf(tolerance_min,
               tolerance_max,
               item,
               df,
               mode,
               title,
               consistent=None,
               curious=None,
               unstable=None,
               rampup_value=0,
               current_dir=""):
    # Tolerance_min represents the min where variance shall be considered (in %)
    # Tolerance_max represents the maximum that variance represent regarding the average (in %)

    variance_group = item.std()
    mean_group = item.mean()
    sum_group = item.sum()
    min_group = mean_group - 2 * variance_group
    max_group = mean_group + 2 * variance_group

    utils.do_print(
        mode, utils.Levels.INFO,
        "%-12s : Group performance : min=%8.2f, mean=%8.2f, max=%8.2f, stddev=%8.2f",
        title, item.min(), mean_group, item.max(), variance_group)

    variance_tolerance = compute_deviance_percentage(title, df.transpose())

    if (rampup_value > 0) and (current_dir):
        utils.write_gnuplot_file(current_dir + "/deviance.plot", rampup_value,
                                 variance_group)
        utils.write_gnuplot_file(current_dir + "/deviance_percentage.plot",
                                 rampup_value, variance_tolerance)
        utils.write_gnuplot_file(current_dir + "/mean.plot", rampup_value,
                                 mean_group)
        utils.write_gnuplot_file(current_dir + "/sum.plot", rampup_value,
                                 sum_group)

    if (variance_tolerance > tolerance_max):
        utils.do_print(
            mode, utils.Levels.ERROR,
            "%-12s : Group's variance is too important : %7.2f%% of %7.2f whereas limit is set to %3.2f%%",
            title, variance_tolerance, mean_group, tolerance_max)
        utils.do_print(mode, utils.Levels.ERROR,
                       "%-12s : Group performance : UNSTABLE", title)
        for host in df.columns:
            if host not in curious:
                unstable.append(host)
    else:
        curious_performance = False
        for host in df.columns:
            if (("loops_per_sec") in mode) or ("bogomips" in mode):
                mean_host = df[host][title].mean()
            else:
                mean_host = df[host].mean()
            # If the variance is very low, don't try to find the black sheep
            if (variance_tolerance > tolerance_min):
                if (mean_host > max_group):
                    curious_performance = True
                    utils.do_print(
                        mode, utils.Levels.WARNING,
                        "%-12s : %s : Curious overperformance  %7.2f : min_allow_group = %.2f, mean_group = %.2f max_allow_group = %.2f",
                        title, host, mean_host, min_group, mean_group,
                        max_group)
                    if host not in curious:
                        curious.append(host)
                        if host in consistent:
                            consistent.remove(host)
                elif (mean_host < min_group):
                    curious_performance = True
                    utils.do_print(
                        mode, utils.Levels.WARNING,
                        "%-12s : %s : Curious underperformance %7.2f : min_allow_group = %.2f, mean_group = %.2f max_allow_group = %.2f",
                        title, host, mean_host, min_group, mean_group,
                        max_group)
                    if host not in curious:
                        curious.append(host)
                        if host in consistent:
                            consistent.remove(host)
                else:
                    if (host not in consistent) and (host not in curious):
                        consistent.append(host)
            else:
                if (host not in consistent) and (host not in curious):
                    consistent.append(host)

        unit = " "
        if "Effi." in title:
            unit = "%"
        if curious_performance is False:
            utils.do_print(
                mode, utils.Levels.INFO,
                "%-12s : Group performance = %7.2f %s : CONSISTENT", title,
                mean_group, unit)
        else:
            utils.do_print(
                mode, utils.Levels.WARNING,
                "%-12s : Group performance = %7.2f %s : SUSPICIOUS", title,
                mean_group, unit)
Esempio n. 7
0
def memory_perf(systems, group_number, detail_options):
    print "Group %d : Checking Memory perf" % group_number
    modes = ['1K', '4K', '1M', '16M', '128M', '1G', '2G']
    sets = search_item(systems, "cpu", "(.*)", [], modes)
    for mode in modes:
        real_mode = "Memory benchmark %s" % mode
        results = {}
        threaded_perf = dict()
        forked_perf = dict()
        for system in sets:
            memory = []
            series = []
            threaded_perf[system] = 0
            forked_perf[system] = 0
            for perf in sets[system]:
                if (mode in perf[2]):
                    # We shall split individual cpu benchmarking from the global one
                    if ("logical_" in perf[1]) and (("bandwidth_%s" % mode)
                                                    in perf[2]):
                        if (not perf[1] in memory):
                            memory.append(perf[1])
                        series.append(float(perf[3]))
                    elif ("threaded_bandwidth_%s" % mode) in perf[2]:
                        threaded_perf[system] = float(perf[3])
                    elif ("forked_bandwidth_%s" % mode) in perf[2]:
                        forked_perf[system] = float(perf[3])
            results[system] = Series(series, index=memory)

        consistent = []
        curious = []
        unstable = []
        details = []
        matched_category = ''

        df = DataFrame(results)
        for memory in df.transpose().columns:
            print_perf(1, 7,
                       df.transpose()[memory], df, real_mode, memory,
                       consistent, curious, unstable)
            matched_category = []
            prepare_detail(detail_options, group_number, mode, memory, details,
                           matched_category)

        print_detail(detail_options, details, df, matched_category)
        print_summary(mode, consistent, "consistent", "MB/s", df)
        print_summary(mode, curious, "curious", "MB/s", df)
        print_summary(mode, unstable, "unstable", "MB/s", df)

        for bench_type in ["threaded", "forked"]:
            efficiency = {}
            have_forked_or_threaded = False
            if ("threaded" in bench_type):
                mode_text = "Thread effi."
            else:
                mode_text = "Forked Effi."
            for system in sets:
                host_efficiency_full_load = []
                host_perf = df[system].sum()
                if (host_perf > 0) and (threaded_perf[system] >
                                        0) and (forked_perf[system] > 0):
                    have_forked_or_threaded = True
                    if ("threaded" in bench_type):
                        host_efficiency_full_load.append(
                            threaded_perf[system] / host_perf * 100)
                    else:
                        host_efficiency_full_load.append(forked_perf[system] /
                                                         host_perf * 100)

                    efficiency[system] = Series(host_efficiency_full_load,
                                                index=[mode_text])

            details = []
            memory_eff = DataFrame(efficiency)
            if have_forked_or_threaded is True:
                consistent = []
                curious = []
                unstable = []
                print_perf(2, 10,
                           memory_eff.transpose()[mode_text], memory_eff,
                           real_mode, mode_text, consistent, curious, unstable)
                matched_category = []
                prepare_detail(detail_options, group_number, mode, memory,
                               details, matched_category)
                print_detail(detail_options, details, memory_eff,
                             matched_category)
                print_summary(mode + " " + mode_text, consistent, "consistent",
                              "MB/s", memory_eff)
                print_summary(mode + " " + mode_text, curious, "curious",
                              "MB/s", memory_eff)
                print_summary(mode + " " + mode_text, unstable, "unstable",
                              "MB/s", memory_eff)
            else:
                utils.do_print(real_mode, utils.Levels.WARNING,
                               "%-12s : Benchmark not run on this group",
                               mode_text)
        print
Esempio n. 8
0
def memory_perf(systems, unique_id, group_number, detail_options,
                rampup_value=0, current_dir=""):
    have_memory_data = False
    modes = ['1K', '4K', '1M', '16M', '128M', '256M', '1G', '2G']
    sets = search_item(systems, unique_id, "cpu", "(.*)", [], modes)
    for mode in modes:
        real_mode = "Memory benchmark %s" % mode
        results = {}
        threaded_perf = dict()
        forked_perf = dict()
        for system in sets:
            memory = []
            series = []
            found_data = ""
            threaded_perf[system] = 0
            forked_perf[system] = 0
            for perf in sets[system]:
                if mode in perf[2]:
                    # We shall split individual cpu benchmarking from
                    # the global one
                    if ("logical_" in perf[1] and
                            ("bandwidth_%s" % mode) in perf[2]):
                        if not perf[1] in memory:
                            memory.append(perf[1])
                        series.append(float(perf[3]))
                    elif ("threaded_bandwidth_%s" % mode) in perf[2]:
                        threaded_perf[system] = float(perf[3])
                        found_data = float(perf[3])
                    elif ("forked_bandwidth_%s" % mode) in perf[2]:
                        forked_perf[system] = float(perf[3])
                        found_data = float(perf[3])

            if found_data:
                # If no series are populated, it means that a single "All CPU"
                # run was done
                # If so, let's create a single run value
                if not series:
                    series.append(found_data)
                    memory.append("logical")

            results[system] = Series(series, index=memory)

        # No need to continue if no Memory data in this benchmark
        if not results:
            continue

        consistent = []
        curious = []
        unstable = []
        details = []
        matched_category = ''

        df = DataFrame(results)
        for memory in df.transpose().columns:
            if have_memory_data is False:
                print
                print "Group %d : Checking Memory perf" % group_number
                have_memory_data = True

            print_perf(1, 7, df.transpose()[memory], df, real_mode, memory,
                       consistent, curious, unstable, rampup_value,
                       current_dir)
            matched_category = []
            prepare_detail(detail_options, group_number, mode, memory,
                           details, matched_category)

        print_detail(detail_options, details, df, matched_category)
        print_summary(mode, consistent, "consistent", "MB/s", df)
        print_summary(mode, curious, "curious", "MB/s", df)
        print_summary(mode, unstable, "unstable", "MB/s", df)

        for bench_type in ["threaded", "forked"]:
            efficiency = {}
            have_forked_or_threaded = False
            if "threaded" in bench_type:
                mode_text = "Thread effi."
            else:
                mode_text = "Forked Effi."
            for system in sets:
                host_efficiency_full_load = []
                host_perf = df[system].sum()
                if (host_perf > 0 and threaded_perf[system] > 0 and
                        forked_perf[system] > 0):
                    have_forked_or_threaded = True
                    if "threaded" in bench_type:
                        host_efficiency_full_load.append(
                            threaded_perf[system] / host_perf * 100)
                    else:
                        host_efficiency_full_load.append(
                            forked_perf[system] / host_perf * 100)

                    efficiency[system] = Series(host_efficiency_full_load,
                                                index=[mode_text])

            details = []
            memory_eff = DataFrame(efficiency)
            if have_forked_or_threaded is True:
                consistent = []
                curious = []
                unstable = []

                for memory in memory_eff.transpose().columns:
                    print_perf(2, 10, memory_eff.transpose()[memory],
                               memory_eff, real_mode, memory, consistent,
                               curious, unstable)
                    matched_category = []
                    prepare_detail(detail_options, group_number, mode,
                                   memory, details, matched_category)

                # Let's pad if its a thread or forked effi in addition
                # of the block size
                if matched_category:
                    matched_category[0] += " " + mode_text

                print_detail(detail_options, details, memory_eff,
                             matched_category)
                print_summary(mode + " " + mode_text, consistent,
                              "consistent", "%", memory_eff)
                print_summary(mode + " " + mode_text, curious,
                              "curious", "%", memory_eff)
                print_summary(mode + " " + mode_text, unstable,
                              "unstable", "%", memory_eff)
            else:
                utils.do_print(real_mode, utils.Levels.WARNING,
                               "%-12s : Benchmark not run on this group",
                               mode_text)