def test_warehouse_load(dd_run_check, aggregator, instance): # type: (Callable[[SnowflakeCheck], None], AggregatorStub, Dict[str, Any]) -> None expected_wl_metrics = [ ('COMPUTE_WH', Decimal('0.000446667'), Decimal('0E-9'), Decimal('0E-9'), Decimal('0E-9')), ] expected_tags = EXPECTED_TAGS + ['warehouse:COMPUTE_WH'] with mock.patch( 'datadog_checks.snowflake.SnowflakeCheck.execute_query_raw', return_value=expected_wl_metrics): check = SnowflakeCheck(CHECK_NAME, {}, [instance]) check._conn = mock.MagicMock() check._query_manager.queries = [Query(queries.WarehouseLoad)] dd_run_check(check) aggregator.assert_metric('snowflake.query.executed', value=0.000446667, tags=expected_tags) aggregator.assert_metric('snowflake.query.queued_overload', value=0, count=1, tags=expected_tags) aggregator.assert_metric('snowflake.query.queued_provision', value=0, count=1, tags=expected_tags) aggregator.assert_metric('snowflake.query.blocked', value=0, count=1, tags=expected_tags)
def test_process_metrics(aggregator, check): con = mock.MagicMock() cur = mock.MagicMock() con.cursor.return_value = cur metrics = copy.deepcopy(queries.ProcessMetrics['columns'][1:]) programs = [ "PSEUDO", "[email protected] (PMON)", "[email protected] (PSP0)", "[email protected] (VKTM)", ] cur.fetchall.return_value = [[program] + ([0] * len(metrics)) for program in programs] check._cached_connection = con check._query_manager.queries = [Query(queries.ProcessMetrics)] check._query_manager.tags = ['custom_tag'] check._query_manager.compile_queries() check._query_manager.execute() for i, metric in enumerate(metrics): expected_program = programs[i] aggregator.assert_metric( 'oracle.{}'.format(metric['name']), count=1, value=0, tags=['custom_tag', 'program:{}'.format(expected_program)], )
def test_login_metrics(dd_run_check, aggregator, instance): # type: (Callable[[SnowflakeCheck], None], AggregatorStub, Dict[str, Any]) -> None expected_login_metrics = [('SNOWFLAKE_UI', 2, 6, 8), ('PYTHON_DRIVER', 0, 148, 148)] with mock.patch( 'datadog_checks.snowflake.SnowflakeCheck.execute_query_raw', return_value=expected_login_metrics): check = SnowflakeCheck(CHECK_NAME, {}, [instance]) check._conn = mock.MagicMock() check._query_manager.queries = [Query(queries.LoginMetrics)] dd_run_check(check) snowflake_tags = EXPECTED_TAGS + ['client_type:SNOWFLAKE_UI'] aggregator.assert_metric('snowflake.logins.fail.count', value=2, tags=snowflake_tags) aggregator.assert_metric('snowflake.logins.success.count', value=6, tags=snowflake_tags) aggregator.assert_metric('snowflake.logins.total', value=8, tags=snowflake_tags) python_tags = EXPECTED_TAGS + ['client_type:PYTHON_DRIVER'] aggregator.assert_metric('snowflake.logins.fail.count', value=0, tags=python_tags) aggregator.assert_metric('snowflake.logins.success.count', value=148, tags=python_tags) aggregator.assert_metric('snowflake.logins.total', value=148, tags=python_tags)
def create_query_manager(*args, **kwargs): executor = kwargs.pop('executor', None) if executor is None: executor = mock_executor() check = kwargs.pop('check', None) or AgentCheck('test', {}, [{}]) return QueryManager(check, executor, [Query(arg) for arg in args], **kwargs)
def test_query_metrics(dd_run_check, aggregator, instance): # type: (Callable[[SnowflakeCheck], None], AggregatorStub, Dict[str, Any]) -> None expected_query_metrics = [ ( 'USE', 'COMPUTE_WH', 'SNOWFLAKE', None, Decimal('4.333333'), Decimal('24.555556'), Decimal('0.000000'), Decimal('0.000000'), Decimal('0.000000'), Decimal('0.000000'), Decimal('0.000000'), ), ] expected_tags = EXPECTED_TAGS + [ 'warehouse:COMPUTE_WH', 'database:SNOWFLAKE', 'schema:None', 'query_type:USE' ] with mock.patch( 'datadog_checks.snowflake.SnowflakeCheck.execute_query_raw', return_value=expected_query_metrics): check = SnowflakeCheck(CHECK_NAME, {}, [instance]) check._conn = mock.MagicMock() check._query_manager.queries = [Query(queries.QueryHistory)] dd_run_check(check) aggregator.assert_metric('snowflake.query.execution_time', value=4.333333, tags=expected_tags) aggregator.assert_metric('snowflake.query.compilation_time', value=24.555556, tags=expected_tags) aggregator.assert_metric('snowflake.query.bytes_scanned', value=0, count=1, tags=expected_tags) aggregator.assert_metric('snowflake.query.bytes_written', value=0, count=1, tags=expected_tags) aggregator.assert_metric('snowflake.query.bytes_deleted', value=0, count=1, tags=expected_tags) aggregator.assert_metric('snowflake.query.bytes_spilled.local', value=0, count=1, tags=expected_tags) aggregator.assert_metric('snowflake.query.bytes_spilled.remote', value=0, count=1, tags=expected_tags)
def test_sys_metrics(aggregator, check): con = mock.MagicMock() cur = mock.MagicMock() con.cursor.return_value = cur metrics = copy.deepcopy(queries.SystemMetrics['columns'][1]['items']) cur.fetchall.return_value = zip([0] * len(metrics.keys()), metrics.keys()) check._cached_connection = con check._query_manager.queries = [Query(queries.SystemMetrics)] check._query_manager.tags = ['custom_tag'] check._query_manager.compile_queries() check._query_manager.execute() for metric in metrics.values(): aggregator.assert_metric('oracle.{}'.format(metric['name']), count=1, value=0, tags=['custom_tag'])
def test_db_storage_metrics(dd_run_check, aggregator, instance): # type: (Callable[[SnowflakeCheck], None], AggregatorStub, Dict[str, Any]) -> None expected_db_storage_usage = [('SNOWFLAKE_DB', Decimal('133.000000'), Decimal('9.100000'))] expected_tags = EXPECTED_TAGS + ['database:SNOWFLAKE_DB'] with mock.patch( 'datadog_checks.snowflake.SnowflakeCheck.execute_query_raw', return_value=expected_db_storage_usage): check = SnowflakeCheck(CHECK_NAME, {}, [instance]) check._conn = mock.MagicMock() check._query_manager.queries = [Query(queries.DatabaseStorageMetrics)] dd_run_check(check) aggregator.assert_metric('snowflake.storage.database.storage_bytes', value=133.0, tags=expected_tags) aggregator.assert_metric('snowflake.storage.database.failsafe_bytes', value=9.1, tags=expected_tags)
def test_warehouse_usage_metrics(dd_run_check, aggregator, instance): # type: (Callable[[SnowflakeCheck], None], AggregatorStub, Dict[str, Any]) -> None expected_wh_usage = [( 'COMPUTE_WH', Decimal('13.000000000'), Decimal('1.000000000'), Decimal('0.870148056'), Decimal('0.066934465846'), Decimal('13.870148056'), Decimal('1.066934465846'), )] expected_tags = EXPECTED_TAGS + ['warehouse:COMPUTE_WH'] with mock.patch( 'datadog_checks.snowflake.SnowflakeCheck.execute_query_raw', return_value=expected_wh_usage): check = SnowflakeCheck(CHECK_NAME, {}, [instance]) check._conn = mock.MagicMock() check._query_manager.queries = [Query(queries.WarehouseCreditUsage)] dd_run_check(check) aggregator.assert_metric('snowflake.billing.warehouse.cloud_service.avg', count=1, tags=expected_tags) aggregator.assert_metric('snowflake.billing.warehouse.total_credit.avg', count=1, tags=expected_tags) aggregator.assert_metric( 'snowflake.billing.warehouse.virtual_warehouse.avg', count=1, tags=expected_tags) aggregator.assert_metric('snowflake.billing.warehouse.cloud_service.sum', count=1, tags=expected_tags) aggregator.assert_metric('snowflake.billing.warehouse.total_credit.sum', count=1, tags=expected_tags) aggregator.assert_metric( 'snowflake.billing.warehouse.virtual_warehouse.sum', count=1, tags=expected_tags)
def __init__(self, name, init_config, instances): # type: (str, dict, list) -> None super(VoltDBCheck, self).__init__(name, init_config, instances) self._config = Config(cast(Instance, self.instance), debug=self.log.debug) self.register_secret(self._config.password) self._client = Client( url=self._config.url, http_get=self.http.get, username=self._config.username, password=self._config.password, password_hashed=self._config.password_hashed, ) manager_queries = [ queries.CPUMetrics, queries.MemoryMetrics, queries.SnapshotStatusMetrics, queries.CommandLogMetrics, queries.ProcedureMetrics, queries.LatencyMetrics, queries.GCMetrics, queries.IOStatsMetrics, queries.TableMetrics, queries.IndexMetrics, ] if BASE_PARSED_VERSION < pkg_resources.parse_version('15.0.0'): # On Agent < 7.24.0 we must to pass `Query` objects instead of dicts. manager_queries = [Query(query) for query in manager_queries] # type: ignore self._query_manager = QueryManager( self, self._execute_query_raw, queries=manager_queries, tags=self._config.tags, ) self.check_initializations.append(self._query_manager.compile_queries)
def test_storage_metrics(dd_run_check, aggregator, instance): # type: (Callable[[SnowflakeCheck], None], AggregatorStub, Dict[str, Any]) -> None expected_storage = [(Decimal('0.000000'), Decimal('1206.000000'), Decimal('19.200000'))] with mock.patch( 'datadog_checks.snowflake.SnowflakeCheck.execute_query_raw', return_value=expected_storage): check = SnowflakeCheck(CHECK_NAME, {}, [instance]) check._conn = mock.MagicMock() check._query_manager.queries = [Query(queries.StorageUsageMetrics)] dd_run_check(check) aggregator.assert_metric('snowflake.storage.storage_bytes.total', value=0.0, tags=EXPECTED_TAGS) aggregator.assert_metric('snowflake.storage.stage_bytes.total', value=1206.0, tags=EXPECTED_TAGS) aggregator.assert_metric('snowflake.storage.failsafe_bytes.total', value=19.2, tags=EXPECTED_TAGS)
def test_credit_usage_metrics(dd_run_check, aggregator, instance): # type: (AggregatorStub, Dict[str, Any]) -> None expected_credit_usage = [( 'WAREHOUSE_METERING', 'COMPUTE_WH', Decimal('12.000000000'), Decimal('1.000000000'), Decimal('0.80397000'), Decimal('0.066997500000'), Decimal('12.803970000'), Decimal('1.066997500000'), )] expected_tags = EXPECTED_TAGS + [ 'service_type:WAREHOUSE_METERING', 'service:COMPUTE_WH' ] with mock.patch( 'datadog_checks.snowflake.SnowflakeCheck.execute_query_raw', return_value=expected_credit_usage): check = SnowflakeCheck(CHECK_NAME, {}, [instance]) check._conn = mock.MagicMock() check._query_manager.queries = [Query(queries.CreditUsage)] dd_run_check(check) aggregator.assert_metric('snowflake.billing.cloud_service.sum', count=1, tags=expected_tags) aggregator.assert_metric('snowflake.billing.cloud_service.avg', count=1, tags=expected_tags) aggregator.assert_metric('snowflake.billing.total_credit.sum', count=1) aggregator.assert_metric('snowflake.billing.total_credit.avg', count=1) aggregator.assert_metric('snowflake.billing.virtual_warehouse.sum', count=1) aggregator.assert_metric('snowflake.billing.virtual_warehouse.avg', count=1)
def test_tablespace_metrics(aggregator, check): con = mock.MagicMock() cur = mock.MagicMock() cur.fetchall.return_value = [ ["offline", 0, 100, 0, 1], ["normal", 50, 100, 50, 0], ["full", 100, 100, 100, 0], ["size_0", 1, 0, 100, 0], ] con.cursor.return_value = cur check._cached_connection = con check._query_manager.queries = [Query(queries.TableSpaceMetrics)] check._query_manager.tags = ['custom_tag'] check._query_manager.compile_queries() check._query_manager.execute() # Offline tablespace tags = ["custom_tag", "tablespace:offline"] aggregator.assert_metric("oracle.tablespace.used", value=0, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.size", value=100, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.in_use", value=0, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.offline", value=1, count=1, tags=tags) # Normal tablespace tags = ["custom_tag", "tablespace:normal"] aggregator.assert_metric("oracle.tablespace.used", value=50, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.size", value=100, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.in_use", value=50, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.offline", value=0, count=1, tags=tags) # Full tablespace tags = ["custom_tag", "tablespace:full"] aggregator.assert_metric("oracle.tablespace.used", value=100, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.size", value=100, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.in_use", value=100, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.offline", value=0, count=1, tags=tags) # Size 0 tablespace tags = ["custom_tag", "tablespace:size_0"] aggregator.assert_metric("oracle.tablespace.used", value=1, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.size", value=0, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.in_use", value=100, count=1, tags=tags) aggregator.assert_metric("oracle.tablespace.offline", value=0, count=1, tags=tags)
SystemMetrics = Query({ 'name': 'system.metrics', 'query': 'SELECT value, metric FROM system.metrics', 'columns': [ { 'name': 'value', 'type': 'source' }, { 'name': 'metric', 'type': 'match', 'source': 'value', 'items': { 'BackgroundMovePoolTask': { 'name': 'background_pool.move.task.active', 'type': 'gauge' }, 'BackgroundPoolTask': { 'name': 'background_pool.processing.task.active', 'type': 'gauge' }, 'BackgroundSchedulePoolTask': { 'name': 'background_pool.schedule.task.active', 'type': 'gauge' }, 'ContextLockWait': { 'name': 'thread.lock.context.waiting', 'type': 'gauge' }, 'DelayedInserts': { 'name': 'query.insert.delayed', 'type': 'gauge' }, 'DictCacheRequests': { 'name': 'dictionary.request.cache', 'type': 'gauge' }, 'DiskSpaceReservedForMerge': { 'name': 'merge.disk.reserved', 'type': 'gauge' }, 'DistributedFilesToInsert': { 'name': 'table.distributed.file.insert.pending', 'type': 'gauge' }, 'DistributedSend': { 'name': 'table.distributed.connection.inserted', 'type': 'gauge' }, 'EphemeralNode': { 'name': 'zk.node.ephemeral', 'type': 'gauge' }, 'GlobalThread': { 'name': 'thread.global.total', 'type': 'gauge' }, 'GlobalThreadActive': { 'name': 'thread.global.active', 'type': 'gauge' }, 'HTTPConnection': { 'name': 'connection.http', 'type': 'gauge' }, 'InterserverConnection': { 'name': 'connection.interserver', 'type': 'gauge' }, 'LeaderElection': { 'name': 'replica.leader.election', 'type': 'gauge' }, 'LeaderReplica': { 'name': 'table.replicated.leader', 'type': 'gauge' }, 'LocalThread': { 'name': 'thread.local.total', 'type': 'gauge' }, 'LocalThreadActive': { 'name': 'thread.local.active', 'type': 'gauge' }, 'MemoryTracking': { 'name': 'query.memory', 'type': 'gauge' }, 'MemoryTrackingForMerges': { 'name': 'merge.memory', 'type': 'gauge' }, 'MemoryTrackingInBackgroundMoveProcessingPool': { 'name': 'background_pool.move.memory', 'type': 'gauge', }, 'MemoryTrackingInBackgroundProcessingPool': { 'name': 'background_pool.processing.memory', 'type': 'gauge', }, 'MemoryTrackingInBackgroundSchedulePool': { 'name': 'background_pool.schedule.memory', 'type': 'gauge', }, 'Merge': { 'name': 'merge.active', 'type': 'gauge' }, 'OpenFileForRead': { 'name': 'file.open.read', 'type': 'gauge' }, 'OpenFileForWrite': { 'name': 'file.open.write', 'type': 'gauge' }, 'PartMutation': { 'name': 'query.mutation', 'type': 'gauge' }, 'Query': { 'name': 'query.active', 'type': 'gauge' }, 'QueryPreempted': { 'name': 'query.waiting', 'type': 'gauge' }, 'QueryThread': { 'name': 'thread.query', 'type': 'gauge' }, 'RWLockActiveReaders': { 'name': 'thread.lock.rw.active.read', 'type': 'gauge' }, 'RWLockActiveWriters': { 'name': 'thread.lock.rw.active.write', 'type': 'gauge' }, 'RWLockWaitingReaders': { 'name': 'thread.lock.rw.waiting.read', 'type': 'gauge' }, 'RWLockWaitingWriters': { 'name': 'thread.lock.rw.waiting.write', 'type': 'gauge' }, 'Read': { 'name': 'syscall.read', 'type': 'gauge' }, 'ReadonlyReplica': { 'name': 'table.replicated.readonly', 'type': 'gauge' }, 'ReplicatedChecks': { 'name': 'table.replicated.part.check', 'type': 'gauge' }, 'ReplicatedFetch': { 'name': 'table.replicated.part.fetch', 'type': 'gauge' }, 'ReplicatedSend': { 'name': 'table.replicated.part.send', 'type': 'gauge' }, 'SendExternalTables': { 'name': 'connection.send.external', 'type': 'gauge' }, 'SendScalars': { 'name': 'connection.send.scalar', 'type': 'gauge' }, 'StorageBufferBytes': { 'name': 'table.buffer.size', 'type': 'gauge' }, 'StorageBufferRows': { 'name': 'table.buffer.row', 'type': 'gauge' }, 'TCPConnection': { 'name': 'connection.tcp', 'type': 'gauge' }, 'Write': { 'name': 'syscall.write', 'type': 'gauge' }, 'ZooKeeperRequest': { 'name': 'zk.request', 'type': 'gauge' }, 'ZooKeeperSession': { 'name': 'zk.connection', 'type': 'gauge' }, 'ZooKeeperWatch': { 'name': 'zk.watch', 'type': 'gauge' }, }, }, ], })
STATS_MYSQL_GLOBAL = Query({ 'name': 'stats_mysql_global', 'query': 'SELECT * FROM stats_mysql_global', 'columns': [ { 'name': 'Variable_Name', 'type': 'match', 'source': 'Variable_Value', 'items': { # the total uptime of ProxySQL in seconds 'ProxySQL_Uptime': { 'name': 'uptime', 'type': 'gauge' }, # memory used by the embedded SQLite 'SQLite3_memory_bytes': { 'name': 'memory.sqlite3_memory_bytes', 'type': 'gauge' }, # provides a count of how many client connection are currently processing a transaction 'Active_Transactions': { 'name': 'active_transactions', 'type': 'gauge' }, # client failed connections (or closed improperly) 'Client_Connections_aborted': { 'name': 'client.connections_aborted', 'type': 'rate' }, # client connections that are currently connected 'Client_Connections_connected': { 'name': 'client.connections_connected', 'type': 'gauge' }, # total number of client connections created 'Client_Connections_created': { 'name': 'client.connections_created', 'type': 'rate' }, # backend failed connections (or closed improperly) 'Server_Connections_aborted': { 'name': 'server.connections_aborted', 'type': 'rate' }, # backend connections that are currently connected 'Server_Connections_connected': { 'name': 'server.connections_connected', 'type': 'gauge' }, # total number of backend connections created 'Server_Connections_created': { 'name': 'server.connections_created', 'type': 'rate' }, # number of client connections that are currently handled by the main worker threads. If ProxySQL # isn't running with "--idle-threads", Client_Connections_non_idle is always equal to # "Client_Connections_connected" 'Client_Connections_non_idle': { 'name': 'client.connections_non_idle', 'type': 'gauge' }, # time spent making network calls to communicate with the backends 'Backend_query_time_nsec': { 'name': 'backend.query_time_pct', 'type': 'temporal_percent', 'scale': 'nanosecond', }, # buffers related to backend connections if "fast_forward" is used, 0 means fast_forward is not used 'mysql_backend_buffers_bytes': { 'name': 'mysql.backend_buffers_bytes', 'type': 'gauge' }, # buffers related to frontend connections (read/write buffers and other queues) 'mysql_frontend_buffers_bytes': { 'name': 'mysql.frontend_buffers_bytes', 'type': 'gauge' }, # other memory used by ProxySQL to handle MySQL Sessions 'mysql_session_internal_bytes': { 'name': 'mysql.session_internal_bytes', 'type': 'gauge' }, # number of MySQL Thread workers i.e. "mysql-threads" 'MySQL_Thread_Workers': { 'name': 'mysql.thread_workers', 'type': 'gauge' }, # The number of monitor threads. By default it is twice the number of worker threads, initially # capped to 16 yet more threads will be created checks are being queued. Monitor threads perform # blocking network operations and do not consume much CPU 'MySQL_Monitor_Workers': { 'name': 'mysql.monitor_workers', 'type': 'gauge' }, # number of requests where a connection was already available in the connection pool 'ConnPool_get_conn_success': { 'name': 'pool.conn_success', 'type': 'rate' }, # number of requests where a connection was not available in the connection pool and either: a new # connection had to be created or no backend was available 'ConnPool_get_conn_failure': { 'name': 'pool.conn_failure', 'type': 'rate' }, # number of connections that a MySQL Thread obtained from its own local connection pool cache. # This value tends to be large only when there is high concurrency. 'ConnPool_get_conn_immediate': { 'name': 'pool.conn_immediate', 'type': 'rate' }, # the total number of client requests / statements executed 'Questions': { 'name': 'questions', 'type': 'rate' }, # the total number of queries with an execution time greater than mysql-long_query_time milliseconds 'Slow_queries': { 'name': 'slow_queries', 'type': 'rate' }, # memory used by the connection pool to store connections metadata 'ConnPool_memory_bytes': { 'name': 'pool.memory_bytes', 'type': 'gauge' }, # the total number of prepared statements that are in use by clients 'Stmt_Client_Active_Total': { 'name': 'client.statements.active_total', 'type': 'gauge' }, # this variable tracks the number of unique prepared statements currently in use by clients 'Stmt_Client_Active_Unique': { 'name': 'client.statements.active_unique', 'type': 'gauge' }, # the total number of prepared statements currently available across all backend connections 'Stmt_Server_Active_Total': { 'name': 'server.statements.active_total', 'type': 'gauge' }, # the number of unique prepared statements currently available across all backend connections 'Stmt_Server_Active_Unique': { 'name': 'server.statements.active_unique', 'type': 'gauge' }, # this is the number of global prepared statements for which proxysql has metadata 'Stmt_Cached': { 'name': 'statements.cached', 'type': 'gauge' }, # memory currently used by the query cache 'Query_Cache_Memory_bytes': { 'name': 'query_cache.memory_bytes', 'type': 'gauge' }, # number of entries currently stored in the query cache 'Query_Cache_Entries': { 'name': 'query_cache.entries', 'type': 'gauge' }, # number of entries purged by the Query Cache due to TTL expiration 'Query_Cache_Purged': { 'name': 'query_cache.purged', 'type': 'rate' }, # number of bytes sent into the Query Cache 'Query_Cache_bytes_IN': { 'name': 'query_cache.bytes_in', 'type': 'rate' }, # number of bytes read from the Query Cache 'Query_Cache_bytes_OUT': { 'name': 'query_cache.bytes_out', 'type': 'rate' }, # number of read requests 'Query_Cache_count_GET': { 'name': 'query_cache.get.count', 'type': 'rate' }, # number of successful read requests 'Query_Cache_count_GET_OK': { 'name': 'query_cache.get_ok.count', 'type': 'rate' }, # number of write requests 'Query_Cache_count_SET': { 'name': 'query_cache.set.count', 'type': 'rate' }, # the time spent inside the Query Processor to determine what action needs to be taken with the # query (internal module) 'Query_Processor_time_nsec': { 'name': 'query_processor_time_pct', 'type': 'temporal_percent', 'scale': 'nanosecond', }, }, }, { 'name': 'Variable_Value', 'type': 'source' }, ], })
from datadog_checks.base.utils.db import Query ProcessMetrics = Query({ 'name': 'process', 'query': 'SELECT PROGRAM, PGA_USED_MEM, PGA_ALLOC_MEM, PGA_FREEABLE_MEM, PGA_MAX_MEM FROM GV$PROCESS', 'columns': [ { 'name': 'program', 'type': 'tag' }, { 'name': 'process.pga_used_memory', 'type': 'gauge' }, { 'name': 'process.pga_allocated_memory', 'type': 'gauge' }, { 'name': 'process.pga_freeable_memory', 'type': 'gauge' }, { 'name': 'process.pga_maximum_memory', 'type': 'gauge' }, ], }) SystemMetrics = Query({
# Licensed under a 3-clause BSD style license (see LICENSE) from datadog_checks.base.utils.db import Query # https://docs.snowflak e.com/en/sql-reference/account-usage/storage_usage.html StorageUsageMetrics = Query({ 'name': 'storage.metrics', 'query': ('SELECT STORAGE_BYTES, STAGE_BYTES, FAILSAFE_BYTES from STORAGE_USAGE ORDER BY USAGE_DATE DESC LIMIT 1;' ), 'columns': [ { 'name': 'storage.storage_bytes.total', 'type': 'gauge' }, { 'name': 'storage.stage_bytes.total', 'type': 'gauge' }, { 'name': 'storage.failsafe_bytes.total', 'type': 'gauge' }, ], }) # https://docs.snowflake.com/en/sql-reference/account-usage/database_storage_usage_history.html DatabaseStorageMetrics = Query({ 'name': 'database_storage.metrics',