Exemple #1
0
 def __init__(self, hostname):
     self.hostname = hostname
     self.syn_counter = metrics.new_meter(str(hostname) + COUNTER_SYN)
     self.syn_ack_counter = metrics.new_meter(
         str(hostname) + COUNTER_SYN_ACK)
     self.est_counter = metrics.new_meter(str(hostname) + COUNTER_EST)
     self.resets_counter = metrics.new_meter(str(hostname) + COUNTER_RST)
     self.fin_in_counter = metrics.new_meter(str(hostname) + COUNTER_FIN_IN)
     self.fin_out_counter = metrics.new_meter(
         str(hostname) + COUNTER_FIN_OUT)
     self.connection_time = metrics.new_histogram(
         str(hostname) + HISTOGRAM_CONN)
     self.outgoing_packets = metrics.new_meter(
         str(hostname) + COUNTER_PKT_OUT)
     self.incoming_packets = metrics.new_meter(
         str(hostname) + COUNTER_PKT_IN)
     self.transport_time = metrics.new_histogram(
         str(hostname) + HISTOGRAM_TRANSPORT)
     self.rt_per_conn_counter = metrics.new_histogram(
         str(hostname) + HISTOGRAM_RT_PER_CONN)
     self.pkt_err_counter = metrics.new_counter(
         str(hostname) + COUNTER_PKT_ERR)
     self.retransmits_counter = metrics.new_counter(
         str(hostname) + COUNTER_RTRS)
     self.states = {}
Exemple #2
0
def benchmark_all():
    run("counter", metrics.new_counter, DURATION)
    run("histogram", metrics.new_histogram, DURATION)
    run(
        "histogram-sliding time window", lambda name: metrics.new_histogram(
            name, histogram.SlidingTimeWindowReservoir(5)), DURATION)
    run(
        "histogram-sliding window", lambda name: metrics.new_histogram(
            name, histogram.SlidingWindowReservoir()), DURATION)
    run(
        "histogram-exponentially decaying", lambda name: metrics.new_histogram(
            name, histogram.ExponentialDecayingReservoir()), DURATION)
    run("meter", metrics.new_meter, DURATION)
Exemple #3
0
 def __init__(self, hostname):
     self.hostname = hostname
     self.syn_counter = metrics.new_meter(str(hostname) + COUNTER_SYN)
     self.syn_ack_counter = metrics.new_meter(str(hostname) + COUNTER_SYN_ACK)
     self.est_counter = metrics.new_meter(str(hostname) + COUNTER_EST)
     self.resets_counter = metrics.new_meter(str(hostname) + COUNTER_RST)
     self.fin_in_counter = metrics.new_meter(str(hostname) + COUNTER_FIN_IN)
     self.fin_out_counter = metrics.new_meter(str(hostname) + COUNTER_FIN_OUT)
     self.connection_time = metrics.new_histogram(str(hostname) + HISTOGRAM_CONN)
     self.outgoing_packets = metrics.new_meter(str(hostname) + COUNTER_PKT_OUT)
     self.incoming_packets = metrics.new_meter(str(hostname) + COUNTER_PKT_IN)
     self.transport_time = metrics.new_histogram(str(hostname) + HISTOGRAM_TRANSPORT)
     self.rt_per_conn_counter = metrics.new_histogram(str(hostname) + HISTOGRAM_RT_PER_CONN)
     self.pkt_err_counter = metrics.new_counter(str(hostname) + COUNTER_PKT_ERR)
     self.retransmits_counter = metrics.new_counter(str(hostname) + COUNTER_RTRS)
     self.states = {}
def train_ae(epoch, dataloader, model, optimizer, writer, loss_func, **kwargs):
    # Initialization of model states, variables etc
    model.train()
    loss_meter = metrics.new_histogram(f"train_vae_loss_{epoch}")
    device = kwargs.get("device", next(model.parameters()).device)

    total_iters = (
        ceil(len(dataloader.dataset) / dataloader.batch_size)
        if not dataloader.drop_last
        else len(dataloader.dataset) // dataloader.batch_size
    )

    with tqdm(total=total_iters) as pbar:
        for batch_idx, (x, _) in enumerate(dataloader):

            optimizer.zero_grad()
            batch_size = x.size(0)
            x = x.to(device)

            xcap = model(x)
            loss = loss_func(xcap, x) / batch_size
            loss_meter.notify(loss.item())
            loss.backward()
            optimizer.step()

            pbar.set_postfix(avg_ae_loss=f'{loss_meter.get()["arithmetic_mean"]:.3e}')
            pbar.update()

    writer.add_scalar("Loss/AE/train/mean", loss_meter.get()["arithmetic_mean"], epoch)
    writer.add_scalar(
        "Loss/AE/train/std_dev", loss_meter.get()["standard_deviation"], epoch
    )
Exemple #5
0
def benchmark_all():
    run("counter", metrics.new_counter, DURATION)
    run("histogram", metrics.new_histogram, DURATION)
    run(
        "histogram-sliding time window",
        lambda name: metrics.new_histogram(name, histogram.SlidingTimeWindowReservoir(5)),
        DURATION)
    run(
        "histogram-sliding window",
        lambda name: metrics.new_histogram(name, histogram.SlidingWindowReservoir()),
        DURATION)
    run(
        "histogram-exponentially decaying",
        lambda name: metrics.new_histogram(name, histogram.ExponentialDecayingReservoir()),
        DURATION)
    run("meter", metrics.new_meter, DURATION)
Exemple #6
0
 def test_histogram(self, mock_librato, librato_reporter, metric,
                    metric_name, full_metric_name, value):
     histogram = _metrics.new_histogram(metric_name)
     histogram.notify(value)
     percentiles = histogram.get()['percentile']
     self.metric_submission_test(librato_reporter, [
         metric(full_metric_name,
                None,
                count=1,
                sum=value,
                min=value,
                max=value)
     ] + [
         metric('.'.join((full_metric_name, str(p))), value)
         for p, _ in percentiles
     ])
Exemple #7
0
def test_histogram():
    @metrics.with_histogram("test1")
    def my_worker():
        time.sleep(random.random())

    # test timing histogram
    my_worker()
    my_worker()
    my_worker()
    logger.debug(metrics.get("test1"))

    # test data histogram
    histogram = metrics.new_histogram("test2")
    histogram.notify(1.0)
    histogram.notify(2.0)
    histogram.notify(3.0)
    logger.debug(histogram.get())
def evaluate_ae(epoch, dataloader, model, writer, loss_func, **kwargs):
    # Initialization of model states, variables etc
    model.eval()
    loss_meter = metrics.new_histogram(f"test_vae_loss_{epoch}")
    device = kwargs.get("device", next(model.parameters()).device)

    with torch.no_grad():
        for batch_idx, (x, _) in enumerate(dataloader):

            batch_size = x.size(0)
            x = x.to(device)

            xcap = model(x)
            loss = loss_func(xcap, x) / batch_size
            loss_meter.notify(loss.item())

    writer.add_scalar("Loss/AE/test/mean", loss_meter.get()["arithmetic_mean"], epoch)
    writer.add_scalar(
        "Loss/AE/test/std_dev", loss_meter.get()["standard_deviation"], epoch
    )
def train_flow(epoch, dataloader, flow_model, ae_model, optimizer, writer, **kwargs):
    flow_model.train()
    loss_meter = metrics.new_histogram(f"train_flow_loss_{epoch}")
    flatten = kwargs.get("flatten", False)

    total_iters = (
        ceil(len(dataloader.dataset) / dataloader.batch_size)
        if not dataloader.drop_last
        else len(dataloader.dataset) // dataloader.batch_size
    )
    ae_model.eval()
    device = kwargs.get("device", next(ae_model.parameters()).device)

    with tqdm(total=total_iters) as pbar:
        for batch_idx, (x, y) in enumerate(dataloader):

            optimizer.zero_grad()
            batch_size = x.size(0)

            with torch.no_grad():
                x = ae_model.encoder(x.to(device))

            if flatten:
                x = x.view(batch_size, -1)

            z, prior_logprob, log_det = flow_model(x)
            logprob = prior_logprob + log_det
            loss = -torch.mean(prior_logprob + log_det)
            loss_meter.notify(loss.item())
            loss.backward()
            optimizer.step()

            pbar.set_postfix(avg_flow_loss=f'{loss_meter.get()["arithmetic_mean"]:.3e}')
            pbar.update()

    writer.add_scalar(
        "Loss/Flow/train/mean", loss_meter.get()["arithmetic_mean"], epoch
    )
    writer.add_scalar(
        "Loss/Flow/train/std_dev", loss_meter.get()["standard_deviation"], epoch
    )
Exemple #10
0
#!/usr/bin/env python

import time

from appmetrics import metrics

from pykafka import KafkaClient

DEBUG = 0

## From AppMetrics
reservoir = metrics.histogram.UniformReservoir(1028)
hist = metrics.new_histogram("call-timer", reservoir)
raw_ns = []

client = KafkaClient(hosts="127.0.0.1:9092")

# implicitly creates the topic
#
topic = client.topics["sandbox-topic"]

# Set linger time to 100ms (default is 5s).  Default is to batch
# messages, and lingering 5s causes confusing delay in exit.
#
with topic.get_producer(linger_ms=5) as producer:
    for count in range(100000):
        msg = "m-{}".format(count)
        #print(msg)
        start_ns = time.time_ns()
        producer.produce(msg.encode('utf-8'))
        end_ns = time.time_ns()
Exemple #11
0
 def timer(self):
     name = random_string() + 't'
     new_histogram(name)
     yield name
Exemple #12
0
#!/usr/bin/env python3

import random

from appmetrics import metrics

if __name__ == '__main__':
    res = metrics.histogram.UniformReservoir(10288)
    hist = metrics.new_histogram("rand-spread", res)

    for rr in range(10000):
        v = random.random() * 1000
        hist.notify(v)

    print("Histogram:")
    for (k,v) in hist.get().items():
        print("  {}: {}".format(k,v))
    print("")
    for v in hist.get()['histogram']:
        print("  {}".format(v))
Exemple #13
0
import random
import time

from appmetrics import metrics


def do_oper():
    if random.random() < 0.1:
        # occasionally (10%) introduce a large variance
        time.sleep(0.1)

    for ii in range(10):
        time.sleep(0.00001)  # 10 uS


if __name__ == "__main__":

    hist = metrics.new_histogram("call_time")

    loops = 100
    for ii in range(loops):
        start_ns = time.time_ns()
        do_oper()
        end_ns = time.time_ns()
        hist.notify(end_ns - start_ns)

    print("Histogram: \n")
    for (k, v) in hist.get().items():
        print("  {}: {}".format(k, v))
def register_app_metrics():
    metrics.new_counter("received_kafka_messages")
    metrics.new_counter("created_thehive_alerts")
    metrics.new_counter("created_thehive_cases")
    metrics.new_counter("successfully_processed_messages")
    metrics.new_counter("enriched_by_hbase_alerts")
    metrics.new_counter("loaded_hbase_normalized_events")
    metrics.new_counter("loaded_hbase_raw_events")
    metrics.new_counter("thehive_api_errors")
    metrics.new_counter("hbase_errors")

    if not metrics.REGISTRY.get("full_processing_time"):
        metrics.new_histogram("full_processing_time", SlidingTimeWindowReservoir())
    if not metrics.REGISTRY.get("hbase_loading_time"):
        metrics.new_histogram("hbase_loading_time", SlidingTimeWindowReservoir())
    if not metrics.REGISTRY.get("send_alert"):
        metrics.new_histogram("send_alert", SlidingTimeWindowReservoir())
    if not metrics.REGISTRY.get("create_case"):
        metrics.new_histogram("create_case", SlidingTimeWindowReservoir())
    if not metrics.REGISTRY.get("merge_alerts_in_case"):
        metrics.new_histogram("merge_alerts_in_case", SlidingTimeWindowReservoir())
    if not metrics.REGISTRY.get("set_final_tag"):
        metrics.new_histogram("set_final_tag", SlidingTimeWindowReservoir())
    if not metrics.REGISTRY.get("thehive_alert_preparing"):
        metrics.new_histogram("thehive_alert_preparing", SlidingTimeWindowReservoir())
    if not metrics.REGISTRY.get("thehive_case_preparing"):
        metrics.new_histogram("thehive_case_preparing", SlidingTimeWindowReservoir())

    metrics.tag("received_kafka_messages", "default")
    metrics.tag("created_thehive_alerts", "default")
    metrics.tag("created_thehive_cases", "default")
    metrics.tag("successfully_processed_messages", "default")
    metrics.tag("enriched_by_hbase_alerts", "default")
    metrics.tag("loaded_hbase_normalized_events", "default")
    metrics.tag("loaded_hbase_raw_events", "default")
    metrics.tag("thehive_api_errors", "default")
    metrics.tag("hbase_errors", "default")
    metrics.tag("full_processing_time", "default")
    metrics.tag("hbase_loading_time", "default")
    metrics.tag("full_processing_time", "profiling")
    metrics.tag("hbase_loading_time", "profiling")
    metrics.tag("send_alert", "profiling")
    metrics.tag("create_case", "profiling")
    metrics.tag("merge_alerts_in_case", "profiling")
    metrics.tag("set_final_tag", "profiling")
    metrics.tag("thehive_alert_preparing", "profiling")
    metrics.tag("thehive_case_preparing", "profiling")
    logger.info("Register some metrics for app: %s", str(metrics.REGISTRY))
Exemple #15
0
 def test_histogram_zero_samples(self, mock_librato, librato_reporter,
                                 metric_name):
     _metrics.new_histogram(metric_name)
     self.metric_submission_test(librato_reporter, [])