Beispiel #1
0
    if baseline:
        b /= baseline
    return b


if __name__ == "__main__":

    # data acquisition
    nbproj, nbsage, nbipynb = [float(_) for _ in running_process_stats()
                               ]  # we are recording double values

    # constructing timeseries
    data = []
    # that's a bit stupid, should be name "instances" and kind="projects", "sagemath", ...
    # but don't change it, dependencies are all over the place -_-
    data.append(make_data("nb_projects", nbproj, kind="single",
                          host=HOST))  ##, logfile=logfile))
    data.append(make_data("nb_sagemath", nbsage, kind="single",
                          host=HOST))  ##, logfile=logfile))
    data.append(make_data("nb_ipynb", nbipynb, kind="single",
                          host=HOST))  ##, logfile=logfile))

    # only run benchmarks every 5 minutes
    if datetime.utcnow().minute % 5 == 0:
        benchmark_cpu = benchmark_repeat("cpu", baseline=0.0642)
        benchmark_mem = benchmark_repeat("memory", baseline=0.0549)
        benchmark_threads = benchmark_repeat("threads", baseline=0.0452)
        benchmark_fs = benchmark_repeat("filesystem", baseline=0.0636818408966)

        data.append(
            make_data("benchmark", benchmark_cpu, kind="cpu", host=HOST))
        data.append(
Beispiel #2
0
        sleep(0.02)
    b = filter_op(times)
    if baseline:
        b /= baseline
    return b

if __name__=="__main__":

    # data acquisition
    nbproj, nbsage, nbipynb = [float(_) for _ in running_process_stats()] # we are recording double values

    # constructing timeseries
    data = []
    # that's a bit stupid, should be name "instances" and kind="projects", "sagemath", ...
    # but don't change it, dependencies are all over the place -_-
    data.append(make_data("nb_projects", nbproj,  kind="single", host=HOST)) ##, logfile=logfile))
    data.append(make_data("nb_sagemath", nbsage,  kind="single", host=HOST)) ##, logfile=logfile))
    data.append(make_data("nb_ipynb",    nbipynb, kind="single", host=HOST)) ##, logfile=logfile))

    # only run benchmarks every 5 minutes
    if datetime.utcnow().minute % 5 == 0:
        benchmark_cpu      = benchmark_repeat("cpu",        baseline=0.0642)
        benchmark_mem      = benchmark_repeat("memory",     baseline=0.0549)
        benchmark_threads  = benchmark_repeat("threads",    baseline=0.0452)
        benchmark_fs       = benchmark_repeat("filesystem", baseline=0.0636818408966)

        data.append(make_data("benchmark",   benchmark_cpu,      kind="cpu",        host=HOST))
        data.append(make_data("benchmark",   benchmark_mem,      kind="memory",     host=HOST))
        data.append(make_data("benchmark",   benchmark_threads,  kind="threads",    host=HOST))
        data.append(make_data("benchmark",   benchmark_fs,       kind="filesystem", host=HOST))
Beispiel #3
0
        concurrent = max(concs) if concs else 0
        modified = float(sum(modifs)) if modifs else 0.0
        ms_median = median(mss) if mss else 0.0
        ms_max = float(max(mss)) if mss else 0.0
        ms_sum = float(sum(mss)) if mss else 0.0
        blocked = float(max(blocked)) if blocked else 0.0
        cf_max = float(max(cfs)) if cfs else None
        msgtimes_max = float(max(msgtimes)) if msgtimes else None

        #print("query median in ms: %s" % ms_median)
        #print("query max in ms: %s" % ms_max)

        logfile = os.path.basename(fn)

        data.append(
            make_data("hub_concurrent", concurrent, host=HOST,
                      logfile=logfile))
        data.append(
            make_data("hub_query_ms",
                      ms_median,
                      kind="median",
                      host=HOST,
                      logfile=logfile))
        data.append(
            make_data("hub_query_ms",
                      ms_max,
                      kind="max",
                      host=HOST,
                      logfile=logfile))
        data.append(
            make_data("hub_query_ms",
                      ms_sum,
Beispiel #4
0
        try:
            rsync = check_output(["pgrep", "-xc", "rsync"])
        except CalledProcessError as cpe:
            rsync = cpe.output
        rsyncs.append(int(rsync))
        sleep(1.0)

    # another idea is to use numpy.median, but "max" is probably more interesting
    # type must be float here!
    rsyncs = float(max(rsyncs))
    return rsyncs


if __name__=="__main__":

    # data acquisition
    rsyncs = float(rsyncs()) # we are recording double values

    # constructing timeseries
    data = []
    data.append(make_data("concurrent_rsyncs", rsyncs, kind="max", host=HOST)) ##, logfile=logfile))

    # submit everything at once, because there is a daily quota limit of the API
    submit_data(*data)


#md = '/usr/bin/python $HOME/monitor_rsyncs/record_metric.py concurrent_rsyncs host=$(hostname) %f' % rsyncs
#print(cmd)
#os.system(cmd)

Beispiel #5
0
        concurrent   = max(concs)           if concs else 0
        modified     = float(sum(modifs))   if modifs else 0.0
        ms_median    = median(mss)          if mss else 0.0
        ms_max       = float(max(mss))      if mss else 0.0
        ms_sum       = float(sum(mss))      if mss else 0.0
        blocked      = float(max(blocked))  if blocked else 0.0
        cf_max       = float(max(cfs))      if cfs else None
        msgtimes_max = float(max(msgtimes)) if msgtimes else None

        #print("query median in ms: %s" % ms_median)
        #print("query max in ms: %s" % ms_max)

        logfile = os.path.basename(fn)

        data.append(make_data("hub_concurrent", concurrent,                  host=HOST, logfile=logfile))
        data.append(make_data("hub_query_ms",   ms_median,    kind="median", host=HOST, logfile=logfile))
        data.append(make_data("hub_query_ms",   ms_max,       kind="max",    host=HOST, logfile=logfile))
        data.append(make_data("hub_query_ms",   ms_sum,       kind="sum",    host=HOST, logfile=logfile))
        data.append(make_data("blocked",        blocked,      kind="max",    host=HOST, logfile=logfile))
        if cf_max is not None:
            data.append(make_data("changefeeds",cf_max,       kind="max",    host=HOST, logfile=logfile))
        if msgtimes_max is not None:
            data.append(make_data("mesg_times", msgtimes_max, kind="max",    host=HOST, logfile=logfile))

        # calc query per second
        if ts_first_query is not None and ts_first_query != ts_last_query:
            tdelta = (ts_last_query - ts_first_query).total_seconds()
            qps = float(query_count) / tdelta
            modified_per_min = float(modified) / (tdelta / 60)
            data.append(make_data("hub_queries_per_second", qps, host=HOST, logfile=logfile))
    while t0 + 10 > time():
        try:
            rsync = check_output(["pgrep", "-xc", "rsync"])
        except CalledProcessError as cpe:
            rsync = cpe.output
        rsyncs.append(int(rsync))
        sleep(1.0)

    # another idea is to use numpy.median, but "max" is probably more interesting
    # type must be float here!
    rsyncs = float(max(rsyncs))
    return rsyncs


if __name__ == "__main__":

    # data acquisition
    rsyncs = float(rsyncs())  # we are recording double values

    # constructing timeseries
    data = []
    data.append(make_data("concurrent_rsyncs", rsyncs, kind="max",
                          host=HOST))  ##, logfile=logfile))

    # submit everything at once, because there is a daily quota limit of the API
    submit_data(*data)

#md = '/usr/bin/python $HOME/monitor_rsyncs/record_metric.py concurrent_rsyncs host=$(hostname) %f' % rsyncs
#print(cmd)
#os.system(cmd)