Ejemplo n.º 1
0
def cfs_rules():
    """ rules to capture from ColumnFamilyStore """
    return (
        case("ColumnFamilyStore"),
        rule(
            capture(
                r"Enqueuing flush of Memtable-(?P<table>[^@]*)@(?P<hash_code>[0-9]*)\((?P<serialized_bytes>[0-9]*)/(?P<live_bytes>[0-9]*) serialized/live bytes, (?P<ops>[0-9]*) ops\)",
                r"Enqueuing flush of (?P<table>[^:]*): (?P<on_heap_bytes>[0-9]*) \((?P<on_heap_limit>[0-9]*)%\) on-heap, (?P<off_heap_bytes>[0-9]*) \((?P<off_heap_limit>[0-9]*)%\) off-heap",
            ),
            convert(
                int,
                "hash_code",
                "serialized_bytes",
                "live_bytes",
                "ops",
                "on_heap_bytes",
                "off_heap_bytes",
                "on_heap_limit",
                "off_heap_limit",
            ),
            update(
                event_product="cassandra",
                event_category="memtable",
                event_type="enqueue_flush",
            ),
        ),
        rule(
            capture(
                r"Flushing largest CFS\(Keyspace='(?P<keyspace>[^']*)', ColumnFamily='(?P<table>[^']*)'\) to free up room. Used total: (?P<used_on_heap>\d+\.\d+)/(?P<used_off_heap>\d+\.\d+), live: (?P<live_on_heap>\d+\.\d+)/(?P<live_off_heap>\d+\.\d+), flushing: (?P<flushing_on_heap>\d+\.\d+)/(?P<flushing_off_heap>\d+\.\d+), this: (?P<this_on_heap>\d+\.\d+)/(?P<this_off_heap>\d+\.\d+)"
            ),
            convert(
                float,
                "used_on_heap",
                "used_off_heap",
                "live_on_heap",
                "live_off_heap",
                "flushing_on_heap",
                "flushing_off_heap",
                "this_on_heap",
                "this_off_heap",
            ),
            update(
                event_product="cassandra",
                event_category="memtable",
                event_type="flush_largest",
            ),
        ),
        rule(
            capture(
                r"Flushing SecondaryIndex (?P<index_type>[^{]*)\{(?P<index_metadata>[^}]*)\}",
                r"Flushing SecondaryIndex (?P<index_class>[^@]*)@(?P<index_hash>.*)",
            ),
            update(
                event_product="cassandra",
                event_category="secondary_index",
                event_type="flush",
            ),
        ),
    )
Ejemplo n.º 2
0
def solr_rules():
    """ rules to capture solr """
    return (
        case("SolrFilterCache"),
        rule(
            capture(
                r"Filter cache org.apache.solr.search.SolrFilterCache\$(?P<id>\S+) has reached (?P<entries>[0-9]+) entries of a maximum of (?P<maximum>[0-9]+). Evicting oldest entries..."
            ),
            convert(int, "entries", "maximum"),
            update(
                event_product="solr",
                event_category="filter_cache",
                event_type="eviction_items",
            ),
        ),
        rule(
            capture(
                r"...eviction completed in (?P<duration>[0-9]+) milliseconds. Filter cache org.apache.solr.search.SolrFilterCache\$(?P<id>\S+) usage is now (?P<usage>[0-9]+) (?P<usage_unit>\w+) across (?P<entries>[0-9]+) entries."
            ),
            convert(int, "duration", "entries", "usage"),
            update(
                event_product="solr",
                event_category="filter_cache",
                event_type="eviction_items_duration",
            ),
        ),
        rule(
            capture(
                r"Filter cache org.apache.solr.search.SolrFilterCache\$(?P<id>\S+) has reached (?P<usage>[0-9]+) (?P<usage_unit>\w+) bytes of off-heap memory usage, the maximum is (?P<maximum>[0-9]+) (?P<maximum_unit>\w+). Evicting oldest entries..."
            ),
            convert(int, "maximum", "usage"),
            update(
                event_product="solr",
                event_category="filter_cache",
                event_type="eviction_bytes",
            ),
        ),
        rule(
            capture(
                r"...eviction completed in (?P<duration>[0-9]+) milliseconds. Filter cache org.apache.solr.search.SolrFilterCache\$(?P<id>\S+) usage is now (?P<usage>[0-9]+) across (?P<entries>[0-9]+) entries."
            ),
            convert(int, "duration", "entries", "usage"),
            update(
                event_product="solr",
                event_category="filter_cache",
                event_type="eviction_bytes_duration",
            ),
        ),
        case("QueryComponent"),
        rule(
            capture(r"process: (?P<query>.*$)"),
            update(
                event_product="solr",
                event_category="query_component",
                event_type="query_logs",
            ),
        ),
    )
Ejemplo n.º 3
0
def tombstone_rules():
    """catch tombstone problems"""
    return (
        case("ReadCommand"),
        rule(
            capture(
                r"Read (?P<live_rows>[0-9]*) live rows and (?P<tombstones>[0-9]*) tombstone cells for query (?P<query>.*) \(see tombstone_warn_threshold\)"
            ),
            convert(int, "tombstones"),
            update(
                event_product="tombstone",
                event_category="reading",
                event_type="tpc_scan_warn",
            ),
        ),
        case("MessageDeliveryTask"),
        rule(
            capture(
                r"Scanned over (?P<tombstones>[0-9]*) tombstones during query '(?P<query>.*)' \(last scanned row partion key was \((?P<pk>.*)\)\); query aborted"
            ),
            convert(int, "tombstones"),
            update(
                event_product="tombstone",
                event_category="reading",
                event_type="scan_error",
            ),
        ),
        case("NoSpamLogger"),
        rule(
            capture(
                r"Scanned over (?P<tombstones>[0-9]*) tombstone rows for query (?P<query>.*) - more than the warning threshold [\d+]+"
            ),
            convert(int, "tombstones"),
            update(
                event_product="tombstone",
                event_category="reading",
                event_type="tpc_scan_warn",
            ),
        ),
        case("MessageDeliveryTask"),
        rule(
            capture(
                r"Read (?P<live>[0-9]*) live rows and (?P<tombstones>[0-9]*) tombstone cells for query (?P<query>.*) \(see tombstone_warn_threshold\)"
            ),
            convert(int, "tombstones"),
            update(
                event_product="tombstone",
                event_category="reading",
                event_type="seda_scan_warn",
            ),
        ),
    )
Ejemplo n.º 4
0
def dd_rules():
    """ rules to capture from database descriptor """
    return (
        case("DatabaseDescriptor"),
        # 6.x disk access mode
        rule(
            # auto mode
            # DiskAccessMode is standard, indexAccessMode is standard, commitlogAccessMode is standard
            capture(
                r"DiskAccessMode is (?P<logged_disk_access_mode>[A-Za-z]*), indexAccessMode is (?P<logged_index_access_mode>[A-Za-z]*), commitlogAccessMode is (?P<logged_commit_log_access_mode>[A-Za-z]*)"
            ),
            update(
                event_product="cassandra",
                event_category="node_config",
                event_type="disk_access",
            ),
        ),
        # 4.8 to 5.1 disk access mode
        rule(
            # auto mode
            # DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap
            capture(
                r"DiskAccessMode 'auto' determined to be (?P<logged_disk_access_mode>[A-Za-z]*), indexAccessMode is (?P<logged_index_access_mode>[A-Za-z]*)"
            ),
            update(
                event_product="cassandra",
                event_category="node_config",
                event_type="disk_access",
            ),
        ),
        # non-auto mode
        rule(
            # DatabaseDescriptor.java:326 - DiskAccessMode is standard, indexAccessMode is mmap
            capture(
                r"DiskAccessMode is (?P<logged_disk_access_mode>[A-Za-z]*), indexAccessMode is (?P<logged_index_access_mode>[A-Za-z]*)"
            ),
            update(
                event_product="cassandra",
                event_category="node_config",
                event_type="disk_access",
            ),
        ),
    )
Ejemplo n.º 5
0
def config_rules():
    """ rules to capture configs """
    return (
        case("Config"),
        # "Node configuration:[aggregated_request_timeout_in_ms=120000; allocate_tokens_for_keyspace=null; allocate_tokens_for_local_replication_factor=3; write_request_timeout_in_ms=2000]
        rule(
            capture(r"Node configuration:\[(?P<node_configuration>.*)\]"),
            convert(nodeconfig, "node_configuration"),
            update(
                event_product="cassandra",
                event_category="node_config",
                event_type="node_configuration",
            ),
        ),
        case("DseConfig"),
        rule(
            # This machine appears to have 1 thread per CPU core.
            capture(
                r"This machine appears to have (?P<threads_per_core>[0-9.]*) thread\w? per CPU core."
            ),
            convert(int, "threads_per_core"),
            update(
                event_product="cassandra",
                event_category="node_config",
                event_type="cores",
            ),
        ),
        rule(
            # This instance appears to have 2 threads per CPU core and 16 total CPU threads.
            # This instance appears to have 1 thread per CPU core and 8 total CPU threads.
            capture(
                r"This instance appears to have (?P<threads_per_core>[0-9.]*) thread\w? per CPU core and (?P<cpu_cores>[0-9.]*) total CPU threads."
            ),
            convert(int, "threads_per_core", "cpu_cores"),
            update(
                event_product="cassandra",
                event_category="node_config",
                event_type="cores",
            ),
        ),
    )
Ejemplo n.º 6
0
def drop_rules():
    """drop rules"""
    return (
        # tpc era
        case("DroppedMessages"),
        rule(
            capture(
                r"(?P<messageType>\S*) messages were dropped in the last 5 s: (?P<localCount>\d*) internal and (?P<remoteCount>\d*) cross node\. Mean internal dropped latency: (?P<localLatency>\d*) ms and Mean cross-node dropped latency: (?P<remoteLatency>\d*) ms"
            ),
            convert(int, "localCount", "remoteCount"),
            convert(
                float,
                "localLatency",
                "remoteLatency",
            ),
            update(
                event_product="cassandra",
                event_category="pools",
                event_type="drops",
            ),
        ),
        # seda era
        case("MessagingService"),
        rule(
            capture(
                r"(?P<messageType>\S*) messages were dropped in last 5000 ms: (?P<localCount>\d*) internal and (?P<remoteCount>\d*) cross node\. Mean internal dropped latency: (?P<localLatency>\d*) ms and Mean cross-node dropped latency: (?P<remoteLatency>\d*) ms"
            ),
            convert(int, "localCount", "remoteCount"),
            convert(
                float,
                "localLatency",
                "remoteLatency",
            ),
            update(
                event_product="cassandra",
                event_category="pools",
                event_type="drops",
            ),
        ),
    )
Ejemplo n.º 7
0
def tpc_rules():
    """rules to capture backpressure and core balance problems"""
    return (
        case("NoSpamLogger"),
        rule(
            capture(
                r"TPC backpressure is active on core (?P<core_num>\d+) with global local/remote pending tasks at (?P<global_pending>\d+)/(?P<remote_pending>\d+)"
            ),
            convert(int, "core_num", "global_pending", "remote_pending"),
            update(
                event_product="tpc",
                event_category="backpressure",
                event_type="core_backpressure",
            ),
        ),
        rule(
            capture(
                r"Local TPC backpressure is active with count (?P<local_count>\d+)"
            ),
            convert(int, "local_count"),
            update(
                event_product="tpc",
                event_category="backpressure",
                event_type="core_backpressure_local",
            ),
        ),
        rule(
            capture(
                r"Rejecting droppable message on connection (?P<message_type>.+) with id (?P<id>\d+) from \/(?P<source_ip>.+) to \/(?P<dest_ip>.+) via \((?P<via_ips>.+)\), total dropped: (?P<total_dropped>.\d+), total pending: (?P<total_pending>.\d+), total completed: (?P<total_completed>.\d+)\."
            ),
            convert(int, "total_dropped"),
            update(
                event_product="tpc",
                event_category="backpressure",
                event_type="network_backpressure",
            ),
        ),
    )
Ejemplo n.º 8
0
def zc_rules():
    """catch issues with zero copy streaming"""
    return (
        case("SSTableReader"),
        rule(
            capture(
                r"Could not recreate or deserialize existing bloom filter, continuing with a pass-through bloom filter but this will significantly impact reads performance"
            ),
            update(
                event_product="zcs",
                event_category="streaming",
                event_type="bloom_filter",
            ),
        ),
    )
Ejemplo n.º 9
0
def gc_rules():
    """ rules to capture gc """
    return (
        case("GCInspector"),
        rule(
            capture(r"Heap is (?P<percent_full>[0-9.]*) full.*"),
            convert(percent, "percent_full"),
            update(
                event_product="cassandra",
                event_category="garbage_collection",
                event_type="heap_full",
            ),
        ),
        rule(
            capture(
                r"GC for (?P<gc_type>[A-Za-z]*): (?P<duration>[0-9]*) ms for (?P<collections>[0-9]*) collections, (?P<used>[0-9]*) used; max is (?P<max>[0-9]*)"
            ),
            convert(int, "duration", "collections", "used", "max"),
            update(
                event_product="cassandra",
                event_category="garbage_collection",
                event_type="pause",
            ),
        ),
        rule(
            capture(
                r"(?P<gc_type>[A-Za-z]*) GC in (?P<duration>[0-9]*)ms. (( CMS)? Old Gen: (?P<oldgen_before>[0-9]*) -> (?P<oldgen_after>[0-9]*);)?( Code Cache: (?P<codecache_before>[0-9]*) -> (?P<codecache_after>[0-9]*);)?( Compressed Class Space: (?P<compressed_class_before>[0-9]*) -> (?P<compressed_class_after>[0-9]*);)?( CMS Perm Gen: (?P<permgen_before>[0-9]*) -> (?P<permgen_after>[0-9]*);)?( Metaspace: (?P<metaspace_before>[0-9]*) -> (?P<metaspace_after>[0-9]*);)?( Par Eden Space: (?P<eden_before>[0-9]*) -> (?P<eden_after>[0-9]*);)?( Par Survivor Space: (?P<survivor_before>[0-9]*) -> (?P<survivor_after>[0-9]*))?"
            ),
            convert(
                int,
                "duration",
                "oldgen_before",
                "oldgen_after",
                "permgen_before",
                "permgen_after",
                "codecache_before",
                "codecache_after",
                "compressed_class_before",
                "compressed_class_after",
                "metaspace_before",
                "metaspace_after",
                "eden_before",
                "eden_after",
                "survivor_before",
                "survivor_after",
            ),
            update(
                event_product="cassandra",
                event_category="garbage_collection",
                event_type="pause",
            ),
        ),
        rule(
            capture(
                r"(?P<gc_type>.+) Generation GC in (?P<duration>[0-9]+)ms.  (Compressed Class Space: (?P<compressed_class_before>[0-9]+) -> (?P<compressed_class_after>[0-9]+);)?.((.+) Eden Space: (?P<eden_before>[0-9]+) -> (?P<eden_after>[0-9]+);)?.((.+) Old Gen: (?P<oldgen_before>[0-9]+) -> (?P<oldgen_after>[0-9]+);)?.((.+) Survivor Space: (?P<survivor_before>[0-9]+) -> (?P<survivor_after>[0-9]+);)?.(Metaspace: (?P<metaspace_before>[0-9]+) -> (?P<metaspace_after>[0-9]+))?"
            ),
            convert(
                int,
                "duration",
                "oldgen_before",
                "oldgen_after",
                "permgen_before",
                "permgen_after",
                "compressed_class_before",
                "compressed_class_after",
                "metaspace_before",
                "metaspace_after",
                "eden_before",
                "eden_after",
                "survivor_before",
                "survivor_after",
            ),
            update(
                event_product="cassandra",
                event_category="garbage_collection",
                event_type="pause",
            ),
        ),
    )
Ejemplo n.º 10
0
def daemon_rules():
    """ rules to capture from daemon """
    return (
        case("StorageService"),
        rule(
            # StorageService.java:607 - Cassandra version: 3.0.12.1656
            capture(r"Cassandra version: (?P<cassandra_version>.*)$"),
            update(
                event_product="cassandra",
                event_category="startup",
                event_type="server_version",
            ),
        ),
        case("DseDaemon"),
        rule(
            capture(r"DSE version: (?P<version>.*)$"),
            update(
                event_product="dse",
                event_category="startup",
                event_type="server_version",
            ),
        ),
        rule(
            # DseDaemon.java:463 - Solr version: 6.0.1.0.2224
            capture(r"Solr version: (?P<solr_version>.*)$"),
            update(
                event_product="dse",
                event_category="startup",
                event_type="server_version",
            ),
        ),
        rule(
            # Spark version: 2.0.2.17
            capture(r"Spark version: (?P<spark_version>.*)$"),
            update(
                event_product="dse",
                event_category="startup",
                event_type="server_version",
            ),
        ),
        rule(
            # Spark Cassandra Connector version: 2.0.7
            capture(
                r"Spark Cassandra Connector version: (?P<spark_connector_version>.*)$"
            ),
            update(
                event_product="dse",
                event_category="startup",
                event_type="server_version",
            ),
        ),
        rule(
            # DSE Spark Connector version: 6.0.6
            capture(
                r"DSE Spark Connector version: (?P<dse_spark_connector_version>.*)$"
            ),
            update(
                event_product="dse",
                event_category="startup",
                event_type="server_version",
            ),
        ),
        case("CassandraDaemon"),
        rule(
            capture(r"JVM Arguments: \[(?P<jvm_args>.*)\]"),
            convert(jvm_args, "jvm_args"),
            update(
                event_product="cassandra",
                event_category="node_config",
                event_type="jvm_args",
            ),
        ),
        rule(
            capture(r"Classpath: (?P<classpath>.*)"),
            update(
                event_product="cassandra",
                event_category="startup",
                event_type="classpath",
            ),
        ),
    )
Ejemplo n.º 11
0
    *dd_rules(),
))


def update_message(fields):
    """ updates message fields """
    subfields = None
    if 'source_file' in fields:
        subfields = capture_message(fields['source_file'][:-5],
                                    fields['message'])
    else:
        #need to handle the no source file output.log format
        if fields['message']:
            # dirty hack where it the message structure tells us what we want
            tokens = fields['message'].split()
            if tokens[0] == 'DiskAccessMode':
                subfields = capture_message('DatabaseDescriptor',
                                            fields['message'])
            elif len(tokens) > 1 and tokens[0] == 'This' and (
                    tokens[1] == "instance" or tokens[1] == "machine"):
                subfields = capture_message('DseConfig', fields['message'])
    if subfields is not None:
        fields.update(subfields)


capture_line = rule(
    output_capture_rule, convert(int, 'source_line'), update_message,
    default(event_product='unknown',
            event_category='unknown',
            event_type='unknown'))
Ejemplo n.º 12
0
# Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# pylint: disable=line-too-long
"""parses block dev reports"""
from pysper.parser.rules import capture, convert, default, rule

capture_line = rule(
    capture(
        #rw     8   512  4096          0    800166076416   /dev/sdb
        r'(?P<ro>[a-z]*) +(?P<ra>[0-9]*) +(?P<ssz>[0-9]*) +(?P<blk_sz>[0-9]*) *\s(?P<start_sec>[0-9]*) +(?P<size>[0-9]*) +(?P<device>.*)'
    ),
    convert(int, 'ra', 'ssz', 'blk_sz', 'blk_sz', 'start_sec', 'size'),
    default(event_product='unknown', event_category='unknown', event_type='unknown'))
Ejemplo n.º 13
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""validates the basic logline function works correctly"""
import unittest
import os
from pysper.parser.rules import capture, default, rule
from pysper import parser
from tests import current_dir

capture_line = rule(
    capture(r"(?P<level>[A-Z]+)"),
    default(event_product="unknown", event_category="unknown", event_type="unknown"),
)


class TestParser(unittest.TestCase):
    """test the parser"""

    def test_parses_all_matches(self):
        """validates the parser returns every line"""
        rows = []
        with open(
            os.path.join(current_dir(__file__), "testdata", "simple.log")
        ) as test_file:
            events = parser.read_log(test_file, capture_line)
            rows = list(events)
        self.assertEqual(len(rows), 2)
Ejemplo n.º 14
0
def update_message(fields):
    """updates message fields"""
    subfields = None
    if "source_file" in fields:
        subfields = capture_message(fields["source_file"][:-5],
                                    fields["message"])
    else:
        # need to handle the no source file output.log format
        if fields["message"]:
            # dirty hack where it the message structure tells us what we want
            tokens = fields["message"].split()
            if tokens[0] == "DiskAccessMode":
                subfields = capture_message("DatabaseDescriptor",
                                            fields["message"])
            elif (len(tokens) > 1 and tokens[0] == "This"
                  and (tokens[1] == "instance" or tokens[1] == "machine")):
                subfields = capture_message("DseConfig", fields["message"])
    if subfields is not None:
        fields.update(subfields)


capture_line = rule(
    output_capture_rule,
    convert(int, "source_line"),
    update_message,
    default(event_product="unknown",
            event_category="unknown",
            event_type="unknown"),
)
Ejemplo n.º 15
0
def memtable_rules():
    """ rules to capture from memtable/cfs """
    return (
        case("Memtable", "ColumnFamilyStore"),
        rule(
            capture(
                r"Writing Memtable-(?P<table>[^@]*)@(?P<hash_code>[0-9]*)\(((?P<serialized_bytes>[0-9]*)|(?P<serialized_kb>[0-9.]*)KiB|(?P<serialized_mb>[0-9.]*)MiB) serialized bytes, (?P<ops>[0-9]*) ops, (?P<on_heap_limit>[0-9]*)%/(?P<off_heap_limit>[0-9]*)% of on/off-heap limit\)",
                r"Writing Memtable-(?P<table>[^@]*)@(?P<hash_code>[0-9]*)\((?P<serialized_bytes>[0-9]*)/(?P<live_bytes>[0-9]*) serialized/live bytes, (?P<ops>[0-9]*) ops\)",
            ),
            convert(
                int,
                "hash_code",
                "serialized_bytes",
                "live_bytes",
                "ops",
                "on_heap_limit",
                "off_heap_limit",
            ),
            convert(float, "serialized_kb"),
            update(
                event_product="cassandra",
                event_category="memtable",
                event_type="begin_flush",
            ),
        ),
        rule(
            capture(
                r"Completed flushing (?P<filename>[^ ]*) \(((?P<file_size_mb>[0-9.]*)MiB|(?P<file_size_kb>[0-9.]*)KiB|(?P<file_size_bytes>[0-9]*) bytes)\) for commitlog position ReplayPosition\(segmentId=(?P<segment_id>[0-9]*), position=(?P<position>[0-9]*)\)",
                r"Completed flushing; nothing needed to be retained.  Commitlog position was ReplayPosition\(segmentId=(?P<segment_id>[0-9]*), position=(?P<position>[0-9]*)\)",
            ),
            convert(int, "file_size_bytes", "segment_id", "position"),
            convert(float, "file_size_kb"),
            update(
                event_product="cassandra",
                event_category="memtable",
                event_type="end_flush",
            ),
        ),
        rule(
            capture(
                r"CFS\(Keyspace='(?P<keyspace>[^']*)', ColumnFamily='(?P<table>[^']*)'\) liveRatio is (?P<live_ratio>[0-9.]*) \(just-counted was (?P<just_counted>[0-9.]*)\).  calculation took (?P<duration>[0-9]*)ms for (?P<cells>[0-9]*) (columns|cells)"
            ),
            convert(float, "live_ratio", "just_counted"),
            convert(int, "duration", "cells"),
            update(
                event_product="cassandra",
                event_category="memtable",
                event_type="live_ratio_estimate",
            ),
        ),
        rule(
            capture(
                "setting live ratio to maximum of (?P<max_sane_ratio>[0-9.]*) instead of (?P<live_ratio_estimate>[0-9.]*)"
            ),
            convert(float, "max_sane_ratio", "estimated_ratio"),
            update(
                event_product="cassandra",
                event_category="memtable",
                event_type="live_ratio_max",
            ),
        ),
    )
Ejemplo n.º 16
0
def status_rules():
    """ rules to capture from statuslogger """
    return (
        case("StatusLogger"),
        rule(
            capture(
                r"^Pool Name +Active +Pending +Backpressure +Delayed +Shared +Stolen +Completed +Blocked +All Time Blocked$"
            ),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="threadpool_header",
                rule_type="6.8",
            ),
        ),
        rule(
            capture(
                r"Pool Name +Active +Pending \(w/Backpressure\) +Delayed +Completed +Blocked +All Time Blocked$"
            ),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="threadpool_header",
                rule_type="new",
            ),
        ),
        rule(
            capture(
                r"Pool Name +Active +Pending( +Completed)? +Blocked( +All Time Blocked)?"
            ),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="threadpool_header",
                rule_type="old",
            ),
        ),
        rule(
            capture(
                r"(?P<pool_name>[A-Za-z0-9_/#]+) +((?P<active>[0-9]+)|N/A) +((?P<pending>[0-9]+)|N/A) +((?P<backpressure>[0-9]+)|N/A) +((?P<delayed>[0-9]+)|N/A) +((?P<shared>[0-9]+)|N/A) +((?P<stolen>[0-9]+)|N/A) +((?P<completed>[0-9]+)|N/A) +((?P<blocked>[0-9]+)|N/A) +((?P<all_time_blocked>[0-9]+)|N/A)$"
            ),
            convert(
                int,
                "active",
                "pending",
                "backpressure",
                "delayed",
                "shared",
                "stolen",
                "completed",
                "blocked",
                "all_time_blocked",
            ),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="threadpool_status",
                rule_type="6.8",
            ),
        ),
        rule(
            capture(
                r"(?P<pool_name>[A-Za-z0-9_/#]+) +((?P<active>[0-9]+)|N/A) +(?P<pending>[0-9]+) +\(((?P<backpressure>[0-9]+)|N/A)\) +((?P<delayed>[0-9]+)|N/A) +(?P<completed>[0-9]+) +((?P<blocked>[0-9]+)|N/A) +(?P<all_time_blocked>[0-9]+)$"
            ),
            convert(
                int,
                "active",
                "pending",
                "backpressure",
                "delayed",
                "completed",
                "blocked",
                "all_time_blocked",
            ),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="threadpool_status",
                rule_type="new",
            ),
        ),
        rule(
            capture(
                r"(?P<pool_name>[A-Za-z_-]+) +((?P<active>[0-9]+)|n/a) +(?P<pending>[0-9]+)(/(?P<pending_responses>[0-9]+))?( +(?P<completed>[0-9]+) +(?P<blocked>[0-9]+) +(?P<all_time_blocked>[0-9]+))$"
            ),
            convert(
                int,
                "active",
                "pending",
                "pending_responses",
                "completed",
                "blocked",
                "all_time_blocked",
            ),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="threadpool_status",
                rule_type="old",
            ),
        ),
        rule(
            capture(
                r"(?P<pool_name>[A-Za-z0-9_/#]+) +((?P<active>[0-9]+)|n/a) +(?P<pending>[0-9]+)(/(?P<backpressure>[0-9]+))?$"
            ),
            convert(int, "active", "pending", "backpressure"),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="threadpool_status",
                rule_type="new",
            ),
        ),
        rule(
            capture(r"Cache Type +Size +Capacity +KeysToSave(Provider)?"),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="cache_header",
            ),
        ),
        rule(
            capture(
                r"(?P<cache_type>[A-Za-z]*Cache(?! Type)) *(?P<size>[0-9]*) *(?P<capacity>[0-9]*) *(?P<keys_to_save>[^ ]*) *(?P<provider>[A-Za-z_.$]*)"
            ),
            convert(int, "size", "capacity"),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="cache_status",
            ),
        ),
        rule(
            capture(r"ColumnFamily +Memtable ops,data"),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="memtable_header",
            ),
        ),
        rule(
            capture(
                r"(?P<keyspace>[^.]*)\.(?P<table>[^ ]*) +(?P<ops>[0-9]*),(?P<data>[0-9]*)"
            ),
            convert(int, "ops", "data"),
            update(
                event_product="cassandra",
                event_category="status",
                event_type="memtable_status",
            ),
        ),
    )
Ejemplo n.º 17
0
# Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""parses block dev reports"""
from pysper.parser.rules import capture, convert, default, rule

capture_line = rule(
    capture(
        # rw     8   512  4096          0    800166076416   /dev/sdb
        r"(?P<ro>[a-z]*) +(?P<ra>[0-9]*) +(?P<ssz>[0-9]*) +(?P<blk_sz>[0-9]*) *\s(?P<start_sec>[0-9]*) +(?P<size>[0-9]*) +(?P<device>.*)"
    ),
    convert(int, "ra", "ssz", "blk_sz", "blk_sz", "start_sec", "size"),
    default(event_product="unknown", event_category="unknown", event_type="unknown"),
)
Ejemplo n.º 18
0
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""validates the basic logline function works correctly"""
import os
from pysper.parser.rules import capture, default, rule
from pysper import parser
from tests import current_dir

capture_line = rule(
    capture(r'(?P<level>[A-Z]+)'),
    default(event_product='unknown',
            event_category='unknown',
            event_type='unknown'))


def test_parses_all_matches():
    """validates the parser returns every line"""
    rows = []
    with open(os.path.join(current_dir(__file__), "testdata",
                           "simple.log")) as test_file:
        events = parser.read_log(test_file, capture_line)
        rows = list(events)
    assert len(rows) == 2
    line1 = "WARN"
    assert rows[0]['level'] == line1
    line2 = "ERROR"
Ejemplo n.º 19
0
def solr_rules():
    """rules to capture solr"""
    return (
        case("SolrFilterCache"),
        rule(
            capture(
                r"Filter cache org.apache.solr.search.SolrFilterCache\$(?P<id>\S+) has reached (?P<entries>[0-9]+) entries of a maximum of (?P<maximum>[0-9]+). Evicting oldest entries..."
            ),
            convert(int, "entries", "maximum"),
            update(
                event_product="solr",
                event_category="filter_cache",
                event_type="eviction_items",
            ),
        ),
        rule(
            capture(
                r"Filter cache org.apache.solr.search.SolrFilterCache\$(?P<id>\S+) has reached (?P<usage>[0-9]+) (?P<usage_unit>\w+) bytes of off-heap memory usage, the maximum is (?P<maximum>[0-9]+) (?P<maximum_unit>\w+). Evicting oldest entries..."
            ),
            convert(int, "maximum", "usage"),
            update(
                event_product="solr",
                event_category="filter_cache",
                event_type="eviction_bytes",
            ),
        ),
        # eviction duration log pattern for DSE before DSP-18693
        rule(
            capture(
                r"...eviction completed in (?P<duration>[0-9]+) milliseconds. Filter cache org.apache.solr.search.SolrFilterCache\$(?P<id>\S+) usage is now (?P<usage>[0-9]+) across (?P<entries>[0-9]+) entries."
            ),
            convert(int, "duration", "entries", "usage"),
            update(
                event_product="solr",
                event_category="filter_cache",
                event_type="eviction_duration",
            ),
        ),
        # new eviction duration log pattern after DSP-18693
        rule(
            capture(
                r"...eviction completed in (?P<duration>[0-9]+) milliseconds. Filter cache org.apache.solr.search.SolrFilterCache\$(?P<id>\S+) usage is now (?P<usage>[0-9]+) (?P<usage_unit>\w+) across (?P<entries>[0-9]+) entries."
            ),
            convert(int, "duration", "entries", "usage"),
            update(
                event_product="solr",
                event_category="filter_cache",
                event_type="eviction_duration",
            ),
        ),
        case("QueryComponent"),
        rule(
            capture(r"process: (?P<query>.*$)"),
            update(
                event_product="solr",
                event_category="query_component",
                event_type="query_logs",
            ),
        ),
        case("AbstractSolrSecondaryIndex"),
        rule(
            capture(
                r"\[(?P<core_name>.+)\]: Increasing soft commit max time to (?P<commit_time>[0-9]+)"
            ),
            update(
                event_product="solr",
                event_category="indexing",
                event_type="increase_soft_commit",
            ),
        ),
        case("AbstractSolrSecondaryIndex"),
        rule(
            capture(
                r"\[(?P<core_name>.+)\]: Restoring soft commit max time back to (?P<commit_time>[0-9]+)"
            ),
            update(
                event_product="solr",
                event_category="indexing",
                event_type="restore_soft_commit",
            ),
        ),
    )