Example #1
0
 def __init__(self, check, config):
     # (MySql, MySQLConfig) -> None
     self._check = check
     self._config = config
     self._db_hostname = None
     self.log = get_check_logger()
     self._state = StatementMetrics()
Example #2
0
 def __init__(self, check, config, connection_args):
     # (MySql, MySQLConfig) -> None
     collection_interval = float(
         config.statement_metrics_config.get('collection_interval', 10))
     if collection_interval <= 0:
         collection_interval = 10
     super(MySQLStatementMetrics, self).__init__(
         check,
         rate_limit=1 / float(collection_interval),
         run_sync=is_affirmative(
             config.statement_metrics_config.get('run_sync', False)),
         enabled=is_affirmative(
             config.statement_metrics_config.get('enabled', True)),
         expected_db_exceptions=(pymysql.err.DatabaseError, ),
         min_collection_interval=config.min_collection_interval,
         dbms="mysql",
         job_name="statement-metrics",
         shutdown_callback=self._close_db_conn,
     )
     self._metric_collection_interval = collection_interval
     self._connection_args = connection_args
     self._db = None
     self._config = config
     self.log = get_check_logger()
     self._state = StatementMetrics()
     self._obfuscate_options = to_native_string(
         json.dumps(self._config.obfuscator_options))
     # full_statement_text_cache: limit the ingestion rate of full statement text events per query_signature
     self._full_statement_text_cache = TTLCache(
         maxsize=self._config.full_statement_text_cache_max_size,
         ttl=60 * 60 /
         self._config.full_statement_text_samples_per_hour_per_query,
     )  # type: TTLCache
Example #3
0
 def __init__(self, check):
     self.check = check
     self.log = check.log
     collection_interval = float(
         check.statement_metrics_config.get('collection_interval', DEFAULT_COLLECTION_INTERVAL)
     )
     if collection_interval <= 0:
         collection_interval = DEFAULT_COLLECTION_INTERVAL
     self.collection_interval = collection_interval
     super(SqlserverStatementMetrics, self).__init__(
         check,
         run_sync=is_affirmative(check.statement_metrics_config.get('run_sync', False)),
         enabled=is_affirmative(check.statement_metrics_config.get('enabled', True)),
         expected_db_exceptions=(),
         min_collection_interval=check.min_collection_interval,
         dbms="sqlserver",
         rate_limit=1 / float(collection_interval),
         job_name="query-metrics",
         shutdown_callback=self._close_db_conn,
     )
     self.disable_secondary_tags = is_affirmative(
         check.statement_metrics_config.get('disable_secondary_tags', False)
     )
     self.dm_exec_query_stats_row_limit = int(
         check.statement_metrics_config.get('dm_exec_query_stats_row_limit', 10000)
     )
     self.enforce_collection_interval_deadline = is_affirmative(
         check.statement_metrics_config.get('enforce_collection_interval_deadline', True)
     )
     self._state = StatementMetrics()
     self._init_caches()
     self._conn_key_prefix = "dbm-"
     self._statement_metrics_query = None
     self._last_stats_query_time = None
Example #4
0
 def __init__(self, check, config, shutdown_callback):
     collection_interval = float(
         config.statement_metrics_config.get('collection_interval', DEFAULT_COLLECTION_INTERVAL)
     )
     if collection_interval <= 0:
         collection_interval = DEFAULT_COLLECTION_INTERVAL
     super(PostgresStatementMetrics, self).__init__(
         check,
         run_sync=is_affirmative(config.statement_metrics_config.get('run_sync', False)),
         enabled=is_affirmative(config.statement_metrics_config.get('enabled', True)),
         expected_db_exceptions=(psycopg2.errors.DatabaseError,),
         min_collection_interval=config.min_collection_interval,
         dbms="postgres",
         rate_limit=1 / float(collection_interval),
         job_name="query-metrics",
         shutdown_callback=shutdown_callback,
     )
     self._metrics_collection_interval = collection_interval
     self._config = config
     self._state = StatementMetrics()
     self._stat_column_cache = []
     self._obfuscate_options = to_native_string(json.dumps(self._config.obfuscator_options))
     # full_statement_text_cache: limit the ingestion rate of full statement text events per query_signature
     self._full_statement_text_cache = TTLCache(
         maxsize=config.full_statement_text_cache_max_size,
         ttl=60 * 60 / config.full_statement_text_samples_per_hour_per_query,
     )
Example #5
0
 def __init__(self, check, config):
     # (MySql, MySQLConfig) -> None
     self._check = check
     self._config = config
     self._db_hostname = None
     self.log = get_check_logger()
     self._state = StatementMetrics()
     # full_statement_text_cache: limit the ingestion rate of full statement text events per query_signature
     self._full_statement_text_cache = TTLCache(
         maxsize=self._config.full_statement_text_cache_max_size,
         ttl=60 * 60 /
         self._config.full_statement_text_samples_per_hour_per_query,
     )  # type: TTLCache
Example #6
0
 def __init__(self, config):
     self.config = config
     self.log = get_check_logger()
     self._state = StatementMetrics()
    def test_compute_derivative_rows_happy_path(self):
        sm = StatementMetrics()

        rows1 = [
            {
                'count': 13,
                'time': 2005,
                'errors': 1,
                'query': 'COMMIT',
                'db': 'puppies',
                'user': '******'
            },
            {
                'count': 25,
                'time': 105,
                'errors': 0,
                'query': 'ROLLBACK',
                'db': 'puppies',
                'user': '******'
            },
            {
                'count': 1,
                'time': 10005,
                'errors': 0,
                'query': 'select * from kennel',
                'db': 'puppies',
                'user': '******'
            },
            {
                'count': 99991882777665555,
                'time': 10005,
                'errors': 0,
                'query': 'update kennel set breed="dalmatian" where id = ?',
                'db': 'puppies',
                'user': '******',
            },
        ]

        def key(row):
            return (row['query'], row['db'], row['user'])

        metrics = ['count', 'time']

        assert [] == sm.compute_derivative_rows(rows1, metrics, key=key)
        # No changes should produce no rows
        assert [] == sm.compute_derivative_rows(rows1, metrics, key=key)

        rows2 = [
            {
                'count': 1,
                'time': 1,
                'errors': 1,
                'query': 'SELECT CURRENT_TIME',
                'db': 'puppies',
                'user': '******'
            },
            add_to_dict(rows1[0], {
                'count': 0,
                'time': 0,
                'errors': 15
            }),
            add_to_dict(rows1[1], {
                'count': 1,
                'time': 15,
                'errors': 0
            }),
            add_to_dict(rows1[2], {
                'count': 20,
                'time': 900,
                'errors': 0
            }),
            add_to_dict(rows1[3], {
                'count': 7,
                'time': 0.5,
                'errors': 0
            }),
        ]
        expected = [
            # First row only incremented 'errors' which is not a tracked metric, so it is omitted from the output
            {
                'count': 1,
                'time': 15,
                'errors': 0,
                'query': 'ROLLBACK',
                'db': 'puppies',
                'user': '******'
            },
            {
                'count': 20,
                'time': 900,
                'errors': 0,
                'query': 'select * from kennel',
                'db': 'puppies',
                'user': '******'
            },
            {
                'count': 7,
                'time': 0.5,
                'errors': 0,
                'query': 'update kennel set breed="dalmatian" where id = ?',
                'db': 'puppies',
                'user': '******',
            },
        ]
        assert expected == sm.compute_derivative_rows(rows2, metrics, key=key)
        # No changes should produce no rows
        assert [] == sm.compute_derivative_rows(rows2, metrics, key=key)
 def test_compute_derivative_rows_boundary_cases(self, fn_args):
     sm = StatementMetrics()
     sm.compute_derivative_rows(*fn_args)
     sm.compute_derivative_rows(*fn_args)
    def test_compute_derivative_rows_stats_reset(self):
        sm = StatementMetrics()

        def key(row):
            return (row['query'], row['db'], row['user'])

        metrics = ['count', 'time']

        rows1 = [
            {
                'count': 13,
                'time': 2005,
                'errors': 1,
                'query': 'COMMIT',
                'db': 'puppies',
                'user': '******'
            },
            {
                'count': 25,
                'time': 105,
                'errors': 0,
                'query': 'ROLLBACK',
                'db': 'puppies',
                'user': '******'
            },
        ]
        rows2 = [
            add_to_dict(rows1[0], {
                'count': 0,
                'time': 1,
                'errors': 15
            }),
            add_to_dict(rows1[1], {
                'count': 1,
                'time': 15,
                'errors': 0
            }),
        ]
        # Simulate a stats reset by decreasing one of the metrics rather than increasing
        rows3 = [
            add_to_dict(rows2[1], {
                'count': 1,
                'time': 15,
                'errors': 0
            }),
            add_to_dict(rows2[0], {
                'count': -1,
                'time': 0,
                'errors': 15
            }),
        ]
        rows4 = [
            add_to_dict(rows3[1], {
                'count': 1,
                'time': 1,
                'errors': 0
            }),
            add_to_dict(rows3[0], {
                'count': 1,
                'time': 1,
                'errors': 1
            }),
        ]
        assert [] == sm.compute_derivative_rows(rows1, metrics, key=key)
        assert 2 == len(sm.compute_derivative_rows(rows2, metrics, key=key))
        assert [] == sm.compute_derivative_rows(rows3, metrics, key=key)
        assert 2 == len(sm.compute_derivative_rows(rows4, metrics, key=key))
Example #10
0
 def __init__(self, config):
     # type: (MySQLConfig) -> None
     self.config = config
     self.log = get_check_logger()
     self._state = StatementMetrics()
    def test_compute_derivative_rows_with_duplicates(self):
        sm = StatementMetrics()

        def key(row):
            return (row['query_signature'], row['db'], row['user'])

        metrics = ['count', 'time']

        rows1 = [
            {
                'count': 13,
                'time': 2005,
                'errors': 1,
                'query': 'SELECT * FROM table1 where id = ANY(?)',
                'query_signature': 'sig1',
                'db': 'puppies',
                'user': '******',
            },
            {
                'count': 25,
                'time': 105,
                'errors': 0,
                'query': 'SELECT * FROM table1 where id = ANY(?, ?)',
                'query_signature': 'sig1',
                'db': 'puppies',
                'user': '******',
            },
        ]

        rows2 = [
            {
                'count': 14,
                'time': 2006,
                'errors': 32,
                'query': 'SELECT * FROM table1 where id = ANY(?)',
                'query_signature': 'sig1',
                'db': 'puppies',
                'user': '******',
            },
            {
                'count': 26,
                'time': 125,
                'errors': 1,
                'query': 'SELECT * FROM table1 where id = ANY(?, ?)',
                'query_signature': 'sig1',
                'db': 'puppies',
                'user': '******',
            },
        ]

        # Run a first check to initialize tracking
        sm.compute_derivative_rows(rows1, metrics, key=key)
        # Run the check again to compute the metrics
        metrics = sm.compute_derivative_rows(rows2, metrics, key=key)

        expected_merged_metrics = [{
            'count': 2,
            'time': 21,
            'errors': 32,
            'db': 'puppies',
            'query': 'SELECT * FROM table1 where id = ANY(?)',
            'query_signature': 'sig1',
            'user': '******',
        }]

        assert expected_merged_metrics == metrics