Пример #1
0
def main2():
    tsa = datalogger.load_tsa_raw(datestring)
    print(len(tsa.keys()))
    print(tsa.keys()[0])
    print(tsa[tsa.keys()[0]])
    tsastat = TimeseriesArrayStats(tsa)
    print(tsastat[tsastat.keys()[0]])
Пример #2
0
def main3():
    tsa = datalogger[datestring]
    print(len(tsa.keys()))
    print(tsa.keys()[0])
    print(tsa[tsa.keys()[0]])
    tsastat = TimeseriesArrayStats(tsa)
    print(tsastat[tsastat.keys()[0]])
    print(tsa[("ucsfib-sr1-1-mgmt0","port-channel1304","propVirtual")]["ifOutOctets"])
    print(tsastat[("ucsfib-sr1-1-mgmt0","port-channel1304","propVirtual")])
Пример #3
0
def get_total_stats(datalogger, tsa, keyfunc):
    wikitext = ""
    wikitext += "---+ Total-Data Statistics\n"
    for value_key in tsa.get_value_keys():
        wikitext += "---++ %s Total Statistics of %s\n" % (datalogger.tablename, value_key)
        wikitext += "This table is a summation of all data for field using %s\n" % "lambda a: sum(a)"
        grouped_tsa = tsa.get_group_by_tsa((), group_func=lambda a: sum(a))
        tsa_stats = TimeseriesArrayStats(grouped_tsa)
        wikitext += get_wiki_dict_table(tsa_stats.get_stats(value_key), keyfunc)
    return(wikitext)
Пример #4
0
def get_ungrouped_stats(datalogger, tsa, keyfunc):
    wikitext = ""
    wikitext += "---+ Ungrouped-Data Statistics\n"
    wikitext += "Here are some statistical breakdowns for every index combination\n"
    for value_key in tsa.get_value_keys():
        wikitext += "---++ %s Statistics of %s\n" % (datalogger.tablename, value_key)
        tsa_stats = TimeseriesArrayStats(tsa)
        stat_dict = tsa_stats.get_stats(value_key)
        wikitext += get_wiki_dict_table(stat_dict, keyfunc)
    return(wikitext)
Пример #5
0
def get_grouped_stats(datalogger, tsa, keyfunc):
    wikitext = ""
    wikitext += "---+ Grouped-Data Statistics\n"
    wikitext += "These statistics are grouped by some index_key, in sql something like select sum(value_key) from ... group by index_key\n"
    for subkey in tsa.get_index_keys():
        wikitext += "---++ Grouped by %s\n" % subkey
        for value_key in tsa.get_value_keys():
            wikitext += "---+++ %s Statistics of %s grouped by %s\n" % (datalogger.tablename, value_key, subkey)
            wikitext += "This table is grouped by %s field using %s\n" % (subkey, "lambda a: sum(a)")
            grouped_tsa = tsa.get_group_by_tsa((subkey,), group_func=lambda a: sum(a))
            tsa_stats = TimeseriesArrayStats(grouped_tsa)
            wikitext += get_wiki_dict_table(tsa_stats.get_stats(value_key), keyfunc)
    return(wikitext)
Пример #6
0
def groupby(tsastat, index_keys):
    """
    group tsastat by index_keys, which are a subset of the original index_keys

    the grouping functions are predefined, it makes no sense to make this variable

    parameters:
    tsastat <TimeseriesArrayStats>
    index_keys <tuple>

    returns:
    <TimeseriesArrayStats>
    """
    group_funcs = {
        "sum" : lambda a, b: a + b,
        "avg" : lambda a, b: (a + b) / 2,
        "min" : min,
        "max" : max,
        "count" : lambda a, b: a + b,
        "std" : lambda a, b: (a + b) / 2,
        "median" : lambda a, b: (a + b) / 2,
        "mean" : lambda a, b: (a + b) / 2,
        "last" : lambda a, b: (a + b) / 2,
        "first" : lambda a, b: (a + b) / 2,
    }
    try:
        assert all(index_key in tsastat.index_keys for index_key in index_keys)
    except AssertionError:
        logging.error("All given index_keys have to be in tsastat.index_keys")
        return
    # intermediate data
    data = {}
    for key in tsastat.keys():
        key_dict = dict(zip(tsastat.index_keys, key))
        group_key = tuple((key_dict[key] for key in index_keys))
        if group_key not in data:
            data[group_key] = tsastat[key].stats
        else:
            # there is something to group
            for value_key in tsastat[key].keys():
                for stat_func, value in tsastat[key][value_key].items():
                    # group values by function
                    grouped_value = group_funcs[stat_func](value, data[group_key][value_key][stat_func])
                    # store
                    data[group_key][value_key][stat_func] = grouped_value
    # get to same format as TimeseriesArrayStats.to_json returns
    outdata = [tsastat.index_keys, tsastat.value_keys, ]
    outdata.append([(key, json.dumps(value)) for key, value in data.items()])
    # use TimeseriesArrayStats.from_json to get to TimeseriesArrayStats
    # object
    new_tsastat = TimeseriesArrayStats.from_json(json.dumps(outdata))
    return new_tsastat
def sr_report_unused_cpu(datestring):
    special_reports_dir = os.path.join(datalogger.global_cachedir, datestring, "__special_reports")
    if not os.path.exists(special_reports_dir):
        os.mkdir(special_reports_dir)
    tsastat = datalogger.load_tsastats(datestring)
    # destination tsastat, like original but extended index_keynames
    tsastats_new = TimeseriesArrayStats.__new__(TimeseriesArrayStats)
    tsastats_new.index_keys = tsastat.index_keys + ("esxhost", "cluster")
    tsastats_new.value_keys = tsastat.value_keys
    tsastats_new.stats = {}
    for key, stats in tsastat.stats.items():
        key_dict = dict(zip(tsastat.index_keys, key))
        vm_moref = webapp.get_vm_moref_by_name(key_dict["hostname"])
        host_moref = webapp.get_host_by_vm(vm_moref)
        # try to get ESX Server
        try:
            key_dict["esxhost"] = webapp.get_name_by_moref(host_moref[0])
        except KeyError:
            key_dict["esxhost"] = "not found"
        # try to get cluster
        try:
            key_dict["cluster"] = webapp.get_cluster_by_vm(vm_moref)
        except KeyError:
            key_dict["cluster"] = "no cluster"
        new_index_key = tuple((key_dict[key1] for key1 in tsastats_new.index_keys))
        tsastats_new.stats[new_index_key] = stats
    # group by hostname
    tsastat_g = datalogger.tsastat_group_by(tsastats_new, ("hostname", "cluster", "esxhost"))
    data = [tsastat_g.index_keynames + tsastat_g.value_keynames]
    for key in tsastat_g.keys():
        data.append(key + ("%0.2f" % tsastat_g[key]["cpu.idle.summation"]["min"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["avg"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["max"]))
    json.dump(data, open(os.path.join(special_reports_dir, "sr_unused_cpu_by_hostname.json"), "w"))
    # group by cluster
    tsastat_g = datalogger.tsastat_group_by(tsastats_new, ("cluster", ))
    data = [tsastat_g.index_keynames + tsastat_g.value_keynames]
    for key in tsastat_g.keys():
        data.append(key + ("%0.2f" % tsastat_g[key]["cpu.idle.summation"]["min"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["avg"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["max"]))
    json.dump(data, open(os.path.join(special_reports_dir, "sr_unused_cpu_by_cluster.json"), "w"))
    # group by esxhost
    tsastat_g = datalogger.tsastat_group_by(tsastats_new, ("esxhost", "cluster"))
    data = [tsastat_g.index_keynames + tsastat_g.value_keynames]
    for key in tsastat_g.keys():
        data.append(key + ("%0.2f" % tsastat_g[key]["cpu.idle.summation"]["min"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["avg"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["max"]))
    json.dump(data, open(os.path.join(special_reports_dir, "sr_unused_cpu_by_esxhost.json"), "w"))
Пример #8
0
    def get_tsastats(self, project, tablename, datestring):
        """
        get TimeseriesStatsArray object for this particular project/tablename/datestring combination

        parameters:
        project <str>
        tablename <str>
        datestring <str>

        returns:
        <TimeseriesStatsArray>
        """
        uri_params = {
            "project" : project,
            "tablename" : tablename,
            "datestring" : datestring,
        }
        query_params = {}
        data = self.__get_raw_data("get_tsastats", uri_params, query_params)
        tsastats = TimeseriesArrayStats.from_json(data)
        return tsastats