def test_kv_store_decode_restores_encoded_event():
    event = MetricsEvent(gen_next_timestamp(), gen_metrics_name(), 4.2)

    k, v = KvStoreMetricsFormat.encode(event)
    decoded_event = KvStoreMetricsFormat.decode(k, v)

    assert event == decoded_event
def test_kv_store_encode_generate_different_keys_for_different_seq_no():
    event = MetricsEvent(gen_next_timestamp(), gen_metrics_name(), 4.2)

    k1, v1 = KvStoreMetricsFormat.encode(event, 1)
    k2, v2 = KvStoreMetricsFormat.encode(event, 2)

    assert k1 != k2
    assert v1 == v2
Esempio n. 3
0
def load_metrics_from_kv_store(
    storage: KeyValueStorage,
    min_ts: datetime = None,
    max_ts: datetime = None,
    step: timedelta = timedelta(minutes=1)
) -> MetricsStats:
    result = MetricsStats(step)

    start = KvStoreMetricsFormat.encode_key(min_ts, 0) if min_ts else None
    for k, v in storage.iterator(start=start):
        ev = KvStoreMetricsFormat.decode(k, v)
        if max_ts is not None and ev.timestamp > max_ts:
            break
        result.add(ev.timestamp, ev.name, ev.value)

    return result
Esempio n. 4
0
def load_metrics_from_kv_store(storage: KeyValueStorage,
                               min_ts: datetime = None,
                               max_ts: datetime = None,
                               step: timedelta = timedelta(minutes=1)) -> MetricsStats:
    result = MetricsStats(step)

    start = KvStoreMetricsFormat.encode_key(min_ts, 0) if min_ts else None
    for k, v in storage.iterator(start=start):
        ev = KvStoreMetricsFormat.decode(k, v)
        if ev is None:
            continue
        if max_ts is not None and ev.timestamp > max_ts:
            break
        result.add(ev.timestamp, ev.name, ev.value)

    return result
Esempio n. 5
0
def check_metrics_data(storage):
    events = [KvStoreMetricsFormat.decode(k, v) for k, v in storage.iterator()]

    # Check that metrics are actually written
    assert len(events) > 0

    # Check that all events are stored in correct order
    assert sorted(events, key=lambda v: v.timestamp) == events

    # Check that all event types happened during test
    metric_names = {ev.name for ev in events}
    for t in MetricsName:
        assert t in metric_names
def test_kv_store_metrics_collector_stores_properly_encoded_data(storage: KeyValueStorage, value):
    ts = MockTimestamp(gen_next_timestamp())
    metrics = KvStoreMetricsCollector(storage, ts)
    assert len([(k, v) for k, v in storage.iterator()]) == 0

    id = gen_metrics_name()
    event = MetricsEvent(ts.value, id, value)
    encoded_key, encoded_value = KvStoreMetricsFormat.encode(event)

    metrics.add_event(id, value)
    stored_events = [(k, v) for k, v in storage.iterator()]

    assert len(stored_events) == 1
    assert stored_events[0][0] == encoded_key
    assert stored_events[0][1] == encoded_value
def test_kv_store_metrics_collector_store_all_events_with_same_timestamp(storage: KeyValueStorage):
    ts = MockTimestamp()
    metrics = KvStoreMetricsCollector(storage, ts)
    values = [10, 2, 54, 2]

    for v in values:
        metrics.add_event(MetricsName.BACKUP_THREE_PC_BATCH_SIZE, v)
    events = [KvStoreMetricsFormat.decode(k, v) for k, v in storage.iterator()]

    # Check that all events are stored
    assert len(events) == len(values)
    # Check that all events are stored in correct order
    assert sorted(events, key=lambda ev: ev.timestamp) == events
    # Check that all events stored were in source events
    for ev in events:
        assert ev.value in values
def test_kv_store_metrics_collector_store_all_data_in_order(
        storage: KeyValueStorage):
    ts = MockTimestamp()
    metrics = KvStoreMetricsCollector(storage, ts)
    events = generate_events(10)

    for e in events:
        ts.value = e.timestamp
        metrics.add_event(e.name, e.value)
    stored_events = [
        KvStoreMetricsFormat.decode(k, v) for k, v in storage.iterator()
    ]

    # Check that all events are stored
    assert len(stored_events) == len(events)
    # Check that all events are stored in correct order
    assert sorted(stored_events, key=lambda v: v.timestamp) == stored_events
    # Check that all events stored were in source events
    for ev in stored_events:
        assert ev in events
Esempio n. 9
0
def test_kv_store_metrics_config(looper, txnPoolNodeSet, tdir, tconf,
                                 sdk_pool_handle, sdk_wallet_client):
    total_time = 1.5 * tconf.PerfCheckFreq
    total_iters = 5
    iter_time = total_time / total_iters

    for _ in range(total_iters):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 15)
        looper.runFor(iter_time)

    for node in txnPoolNodeSet:
        storage = initKeyValueStorage(tconf.METRICS_KV_STORAGE,
                                      node.dataLocation,
                                      tconf.METRICS_KV_DB_NAME,
                                      read_only=True)

        events = [
            KvStoreMetricsFormat.decode(k, v) for k, v in storage.iterator()
        ]

        # Check that metrics are actually written
        assert len(events) > 0

        # Check that all events are stored in correct order
        assert sorted(events, key=lambda v: v.timestamp) == events

        # We don't expect some events in this test
        unexpected_events = {
            MetricsName.CATCHUP_TXNS_SENT,
            MetricsName.CATCHUP_TXNS_RECEIVED,
            MetricsName.GC_GEN2_TIME,
            MetricsName.GC_UNCOLLECTABLE_OBJECTS,
            MetricsName.GC_GEN2_COLLECTED_OBJECTS,
            MetricsName.PROCESS_CHECKPOINT_TIME,
            MetricsName.SEND_CHECKPOINT_TIME,
            MetricsName.BACKUP_PROCESS_CHECKPOINT_TIME,
            MetricsName.BACKUP_SEND_CHECKPOINT_TIME,
            MetricsName.PROCESS_CONSISTENCY_PROOF_TIME,
            MetricsName.PROCESS_CATCHUP_REQ_TIME,
            MetricsName.PROCESS_CATCHUP_REP_TIME,
            MetricsName.NODE_CHECK_NODE_REQUEST_SPIKE,
            MetricsName.NODE_SEND_REJECT_TIME,

            # Obsolete metrics
            MetricsName.DESERIALIZE_DURING_UNPACK_TIME,

            # TODO: reduce monitor window so these events are also captured
            MetricsName.MONITOR_AVG_THROUGHPUT,
            MetricsName.BACKUP_MONITOR_AVG_THROUGHPUT,
            MetricsName.MONITOR_AVG_LATENCY,
            MetricsName.BACKUP_MONITOR_AVG_LATENCY,

            # Temporary metrics
            MetricsName.STORAGE_IDR_CACHE_READERS,
            MetricsName.STORAGE_IDR_CACHE_TABLES_NUM,
            MetricsName.STORAGE_IDR_CACHE_TABLES_SIZE,
            MetricsName.STORAGE_ATTRIBUTE_STORE_READERS,
            MetricsName.STORAGE_ATTRIBUTE_STORE_TABLES_NUM,
            MetricsName.STORAGE_ATTRIBUTE_STORE_TABLES_SIZE
        }

        # Don't expect some metrics from master primary
        if node.master_replica.isPrimary:
            unexpected_events.add(MetricsName.PROCESS_PREPREPARE_TIME)
            unexpected_events.add(MetricsName.SEND_PREPARE_TIME)
        else:
            unexpected_events.add(MetricsName.SEND_PREPREPARE_TIME)
            unexpected_events.add(MetricsName.CREATE_3PC_BATCH_TIME)
            unexpected_events.add(MetricsName.BLS_UPDATE_PREPREPARE_TIME)

        # Don't expect some metrics from backup primary
        assert node.replicas.num_replicas == 2
        if node.replicas[1].isPrimary:
            unexpected_events.add(MetricsName.BACKUP_PROCESS_PREPREPARE_TIME)
            unexpected_events.add(MetricsName.BACKUP_SEND_PREPARE_TIME)
        else:
            unexpected_events.add(MetricsName.BACKUP_SEND_PREPREPARE_TIME)
            unexpected_events.add(MetricsName.BACKUP_CREATE_3PC_BATCH_TIME)
            unexpected_events.add(MetricsName.BLS_UPDATE_PREPREPARE_TIME)

        if not node.primaryDecider:
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_ACTION_QUEUE)
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_AQ_STASH)
            unexpected_events.add(
                MetricsName.PRIMARY_DECIDER_REPEATING_ACTIONS)
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_SCHEDULED)
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_INBOX)
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_OUTBOX)

        # Check that all event types happened during test
        metric_names = {ev.name for ev in events}
        for t in MetricsName:
            if t in unexpected_events or t > TMP_METRIC:
                continue
            assert t in metric_names