def __init__(self, check, config): self._check = check self._db = None self._config = config self._log = get_check_logger() self._activity_last_query_start = None self._last_check_run = 0 self._collection_loop_future = None self._cancel_event = threading.Event() self._tags = None self._tags_str = None self._service = "postgres" self._db_hostname = resolve_db_host(self._config.host) self._enabled = is_affirmative(self._config.statement_samples_config.get('enabled', False)) self._run_sync = is_affirmative(self._config.statement_samples_config.get('run_sync', False)) self._rate_limiter = ConstantRateLimiter( float(self._config.statement_samples_config.get('collections_per_second', 1)) ) self._explain_function = self._config.statement_samples_config.get( 'explain_function', 'datadog.explain_statement' ) # explained_statements_cache: limit how often we try to re-explain the same query self._explained_statements_cache = TTLCache( maxsize=int(self._config.statement_samples_config.get('explained_statements_cache_maxsize', 5000)), ttl=60 * 60 / int(self._config.statement_samples_config.get('explained_statements_per_hour_per_query', 60)), ) # seen_samples_cache: limit the ingestion rate per (query_signature, plan_signature) self._seen_samples_cache = TTLCache( # assuming ~100 bytes per entry (query & plan signature, key hash, 4 pointers (ordered dict), expiry time) # total size: 10k * 100 = 1 Mb maxsize=int(self._config.statement_samples_config.get('seen_samples_cache_maxsize', 10000)), ttl=60 * 60 / int(self._config.statement_samples_config.get('samples_per_hour_per_query', 15)), )
def resolved_hostname(self): if self._resolved_hostname is None: if self.reported_hostname: self._resolved_hostname = self.reported_hostname elif self.dbm_enabled: host, port = self.split_sqlserver_host_port(self.instance.get('host')) self._resolved_hostname = resolve_db_host(host) else: self._resolved_hostname = self.agent_hostname return self._resolved_hostname
def __init__(self, check, config, connection_args): self._check = check self._version_processed = False self._connection_args = connection_args # checkpoint at zero so we pull the whole history table on the first run self._checkpoint = 0 self._log = get_check_logger() self._last_check_run = 0 self._db = None self._tags = None self._tags_str = None self._service = "mysql" self._collection_loop_future = None self._cancel_event = threading.Event() self._rate_limiter = ConstantRateLimiter(1) self._config = config self._db_hostname = resolve_db_host(self._config.host) self._enabled = is_affirmative( self._config.statement_samples_config.get('enabled', False)) self._run_sync = is_affirmative( self._config.statement_samples_config.get('run_sync', False)) self._collections_per_second = self._config.statement_samples_config.get( 'collections_per_second', -1) self._events_statements_row_limit = self._config.statement_samples_config.get( 'events_statements_row_limit', 5000) self._explain_procedure = self._config.statement_samples_config.get( 'explain_procedure', 'explain_statement') self._fully_qualified_explain_procedure = self._config.statement_samples_config.get( 'fully_qualified_explain_procedure', 'datadog.explain_statement') self._events_statements_temp_table = self._config.statement_samples_config.get( 'events_statements_temp_table_name', 'datadog.temp_events') self._events_statements_enable_procedure = self._config.statement_samples_config.get( 'events_statements_enable_procedure', 'datadog.enable_events_statements_consumers') self._preferred_events_statements_tables = EVENTS_STATEMENTS_PREFERRED_TABLES self._has_window_functions = False events_statements_table = self._config.statement_samples_config.get( 'events_statements_table', None) if events_statements_table: if events_statements_table in DEFAULT_EVENTS_STATEMENTS_COLLECTIONS_PER_SECOND: self._log.debug( "Configured preferred events_statements_table: %s", events_statements_table) self._preferred_events_statements_tables = [ events_statements_table ] else: self._log.warning( "Invalid events_statements_table: %s. Must be one of %s. Falling back to trying all tables.", events_statements_table, ', '.join(DEFAULT_EVENTS_STATEMENTS_COLLECTIONS_PER_SECOND. keys()), ) self._explain_strategies = { 'PROCEDURE': self._run_explain_procedure, 'FQ_PROCEDURE': self._run_fully_qualified_explain_procedure, 'STATEMENT': self._run_explain, } self._preferred_explain_strategies = [ 'PROCEDURE', 'FQ_PROCEDURE', 'STATEMENT' ] self._init_caches() self._statement_samples_client = _new_statement_samples_client()
def _db_hostname_cached(self): if self._db_hostname: return self._db_hostname self._db_hostname = resolve_db_host(self._config.host) return self._db_hostname
def test_resolve_db_host(db_host, agent_hostname, want): datadog_agent.set_hostname(agent_hostname) assert resolve_db_host(db_host) == want datadog_agent.reset_hostname()