Beispiel #1
0
def test_minimal_config(aggregator, instance_basic):
    mysql_check = MySql(common.CHECK_NAME, {}, [instance_basic])
    mysql_check.check(instance_basic)

    # Test service check
    aggregator.assert_service_check('mysql.can_connect',
                                    status=MySql.OK,
                                    tags=tags.SC_TAGS_MIN,
                                    count=1)

    # Test metrics
    testable_metrics = variables.STATUS_VARS + variables.VARIABLES_VARS + variables.INNODB_VARS + variables.BINLOG_VARS

    for mname in testable_metrics:
        aggregator.assert_metric(mname, at_least=1)

    optional_metrics = (variables.COMPLEX_STATUS_VARS +
                        variables.COMPLEX_VARIABLES_VARS +
                        variables.COMPLEX_INNODB_VARS +
                        variables.SYSTEM_METRICS + variables.SYNTHETIC_VARS)

    _test_optional_metrics(aggregator, optional_metrics)
    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics(),
                                             check_submission_type=True)
Beispiel #2
0
def test_statement_samples_invalid_explain_procedure(aggregator, dbm_instance):
    dbm_instance['statement_samples']['run_sync'] = True
    dbm_instance['statement_samples']['explain_procedure'] = 'hello'
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[dbm_instance])
    mysql_check.check(dbm_instance)
    aggregator.assert_metric_has_tag_prefix("dd.mysql.statement_samples.error",
                                            "error:explain-")
Beispiel #3
0
def test_statement_samples_enable_consumers(
        dbm_instance, root_conn, events_statements_enable_procedure):
    dbm_instance['statement_samples']['run_sync'] = True
    dbm_instance['statement_samples'][
        'events_statements_enable_procedure'] = events_statements_enable_procedure
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[dbm_instance])

    # deliberately disable one of the consumers
    with closing(root_conn.cursor()) as cursor:
        cursor.execute(
            "UPDATE performance_schema.setup_consumers SET enabled='NO'  WHERE name = "
            "'events_statements_history_long';")

    original_enabled_consumers = mysql_check._statement_samples._get_enabled_performance_schema_consumers(
    )
    assert original_enabled_consumers == {
        'events_statements_current', 'events_statements_history'
    }

    mysql_check.check(dbm_instance)

    enabled_consumers = mysql_check._statement_samples._get_enabled_performance_schema_consumers(
    )
    if events_statements_enable_procedure == "datadog.enable_events_statements_consumers":
        assert enabled_consumers == original_enabled_consumers.union(
            {'events_statements_history_long'})
    else:
        assert enabled_consumers == original_enabled_consumers
Beispiel #4
0
def test_version_metadata(instance_basic, datadog_agent, version_metadata):
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[instance_basic])
    mysql_check.check_id = 'test:123'

    mysql_check.check(instance_basic)
    datadog_agent.assert_metadata('test:123', version_metadata)
    datadog_agent.assert_metadata_count(len(version_metadata))
def test__get_server_pid():
    """
    Test the logic looping through the processes searching for `mysqld`
    """
    mysql_check = MySql(common.CHECK_NAME, {}, {})
    mysql_check._get_pid_file_variable = mock.MagicMock(return_value=None)
    mysql_check.log = mock.MagicMock()
    dummy_proc = subprocess.Popen(["python"])

    p_iter = psutil.process_iter

    def process_iter():
        """
        Wrap `psutil.process_iter` with a func killing a running process
        while iterating to reproduce a bug in the pid detection.
        We don't use psutil directly here because at the time this will be
        invoked, `psutil.process_iter` will be mocked. Instead we assign it to
        `p_iter` which is then part of the closure (see line above).
        """
        for p in p_iter():
            if dummy_proc.pid == p.pid:
                dummy_proc.terminate()
                dummy_proc.wait()
            # continue as the original `process_iter` function
            yield p

    with mock.patch('datadog_checks.mysql.mysql.psutil.process_iter', process_iter):
        with mock.patch('datadog_checks.mysql.mysql.PROC_NAME', 'this_shouldnt_exist'):
            # the pid should be none but without errors
            assert mysql_check._get_server_pid(None) is None
            assert mysql_check.log.exception.call_count == 0
Beispiel #6
0
def test_statement_samples_loop_inactive_stop(aggregator, dbm_instance):
    # confirm that the collection loop stops on its own after the check has not been run for a while
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[dbm_instance])
    mysql_check.check(dbm_instance)
    # make sure there were no unhandled exceptions
    mysql_check._statement_samples._collection_loop_future.result()
    aggregator.assert_metric(
        "dd.mysql.statement_samples.collection_loop_inactive_stop")
Beispiel #7
0
def test_complex_config(aggregator, instance_complex):
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[instance_complex])
    mysql_check.check(instance_complex)

    _assert_complex_config(aggregator)
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(), check_submission_type=True, exclude=['alice.age', 'bob.age'] + variables.STATEMENT_VARS
    )
Beispiel #8
0
def test_service_check(disable_generic_tags, expected_tags, hostname):
    config = {
        'server': 'localhost',
        'user': '******',
        'disable_generic_tags': disable_generic_tags
    }
    check = MySql(common.CHECK_NAME, {}, instances=[config])

    assert set(check._service_check_tags(hostname)) == expected_tags
Beispiel #9
0
def test_connection_with_host():
    file_instance = {
        'host': 'localhost',
        'user': '******',
        'pass': '******',
    }
    check = MySql(common.CHECK_NAME, {}, [file_instance])
    connection_args = check._get_connection_args()
    assert connection_args == {'ssl': None, 'connect_timeout': 10, 'user': '******', 'passwd': 'pwd', 'host': 'localhost'}
Beispiel #10
0
def test_replication_check_status(
    replica_io_running,
    replica_sql_running,
    source_host,
    slaves_connected,
    check_status_repl,
    check_status_source,
    instance_basic,
    aggregator,
):
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[instance_basic])
    mysql_check.service_check_tags = ['foo:bar']
    mocked_results = {
        'Slaves_connected': slaves_connected,
        'Binlog_enabled': True,
    }
    if replica_io_running[1] is not None:
        mocked_results[replica_io_running[0]] = replica_io_running[1]
    if replica_sql_running[1] is not None:
        mocked_results[replica_sql_running[0]] = replica_sql_running[1]
    if source_host:
        mocked_results['Master_Host'] = source_host

    mysql_check._check_replication_status(mocked_results)
    expected_service_check_len = 0

    if check_status_repl is not None:
        aggregator.assert_service_check(
            'mysql.replication.slave_running',
            check_status_repl,
            tags=['foo:bar', 'replication_mode:replica'],
            count=1)
        aggregator.assert_service_check(
            'mysql.replication.replica_running',
            check_status_repl,
            tags=['foo:bar', 'replication_mode:replica'],
            count=1,
        )
        expected_service_check_len += 1

    if check_status_source is not None:
        aggregator.assert_service_check(
            'mysql.replication.slave_running',
            check_status_source,
            tags=['foo:bar', 'replication_mode:source'],
            count=1)
        aggregator.assert_service_check(
            'mysql.replication.replica_running',
            check_status_source,
            tags=['foo:bar', 'replication_mode:source'],
            count=1,
        )
        expected_service_check_len += 1

    assert len(aggregator.service_checks(
        'mysql.replication.slave_running')) == expected_service_check_len
Beispiel #11
0
def test_statement_samples_max_per_digest(dbm_instance):
    # clear out any events from previous test runs
    dbm_instance['statement_samples']['events_statements_table'] = 'events_statements_history_long'
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[dbm_instance])
    for _ in range(3):
        mysql_check.check(dbm_instance)
    rows = mysql_check._statement_samples._get_new_events_statements('events_statements_history_long', 1000)
    count_by_digest = Counter(r['digest'] for r in rows)
    for _, count in count_by_digest.items():
        assert count == 1, "we should be reading exactly one row per digest out of the database"
Beispiel #12
0
def test_connection_with_defaults_file():
    file_instance = {
        'host': 'localhost',
        'port': '123',
        'user': '******',
        'defaults_file': '/foo/bar',
    }
    check = MySql(common.CHECK_NAME, {}, [file_instance])
    connection_args = check._get_connection_args()
    assert connection_args == {'ssl': None, 'connect_timeout': 10, 'read_default_file': '/foo/bar'}
    assert 'host' not in connection_args
def test_connection_failure(aggregator, instance_error):
    """
    Service check reports connection failure
    """
    mysql_check = MySql(common.CHECK_NAME, {}, {}, instances=[instance_error])

    with pytest.raises(Exception):
        mysql_check.check(instance_error)

    aggregator.assert_service_check('mysql.can_connect', status=MySql.CRITICAL, tags=tags.SC_FAILURE_TAGS, count=1)

    aggregator.assert_all_metrics_covered()
def test_version_metadata(instance_basic, version_metadata):
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[instance_basic])
    mysql_check.check_id = 'test:123'

    with mock.patch(
            'datadog_checks.base.stubs.datadog_agent.set_check_metadata') as m:
        mysql_check.check(instance_basic)

        for name, value in version_metadata.items():
            m.assert_any_call('test:123', name, value)

        assert m.call_count == len(version_metadata)
def test_async_job_enabled(dd_run_check, dbm_instance, activity_enabled):
    dbm_instance['query_activity'] = {
        'enabled': activity_enabled,
        'run_sync': False
    }
    check = MySql(CHECK_NAME, {}, [dbm_instance])
    dd_run_check(check)
    check.cancel()
    if activity_enabled:
        assert check._query_activity._job_loop_future is not None
        check._query_activity._job_loop_future.result()
    else:
        assert check._query_activity._job_loop_future is None
def test_minimal_config(aggregator, spin_up_mysql):
    mysql_check = MySql(common.CHECK_NAME, {}, {})
    mysql_check.check(common_config.MYSQL_MINIMAL_CONFIG)

    # Test service check
    aggregator.assert_service_check('mysql.can_connect', status=MySql.OK,
                                    tags=tags.SC_TAGS_MIN, count=1)

    # Test metrics
    testable_metrics = (variables.STATUS_VARS + variables.VARIABLES_VARS + variables.INNODB_VARS +
                        variables.BINLOG_VARS + variables.SYSTEM_METRICS + variables.SYNTHETIC_VARS)

    for mname in testable_metrics:
        aggregator.assert_metric(mname, at_least=0)
Beispiel #17
0
def test_connection_with_charset(instance_basic):
    instance = copy.deepcopy(instance_basic)
    instance['charset'] = 'utf8mb4'
    check = MySql(common.CHECK_NAME, {}, [instance])

    connection_args = check._get_connection_args()
    assert connection_args == {
        'host': common.HOST,
        'user': common.USER,
        'passwd': common.PASS,
        'port': common.PORT,
        'ssl': None,
        'connect_timeout': 10,
        'charset': 'utf8mb4',
    }
def test_async_job_cancel(aggregator, dd_run_check, dbm_instance):
    dbm_instance['query_activity']['run_sync'] = False
    check = MySql(CHECK_NAME, {}, [dbm_instance])
    dd_run_check(check)
    check.cancel()
    # wait for it to stop and make sure it doesn't throw any exceptions
    check._query_activity._job_loop_future.result()
    assert not check._query_activity._job_loop_future.running(
    ), "activity thread should be stopped"
    # if the thread doesn't start until after the cancel signal is set then the db connection will never
    # be created in the first place
    aggregator.assert_metric(
        "dd.mysql.async_job.cancel",
        tags=_expected_dbm_instance_tags(dbm_instance),
    )
def test_activity_collection_rate_limit(aggregator, dd_run_check,
                                        dbm_instance):
    # test the activity collection loop rate limit
    aggregator.reset()
    collection_interval = 0.1
    dbm_instance['query_activity']['collection_interval'] = collection_interval
    dbm_instance['query_activity']['run_sync'] = False
    check = MySql(CHECK_NAME, {}, [dbm_instance])
    dd_run_check(check)
    sleep_time = 1
    time.sleep(sleep_time)
    max_collections = int(1 / collection_interval * sleep_time) + 1
    check.cancel()
    metrics = aggregator.metrics(
        "dd.mysql.activity.collect_activity.payload_size")
    assert max_collections / 2.0 <= len(metrics) <= max_collections
Beispiel #20
0
def test_custom_queries(aggregator, instance_custom_queries, dd_run_check):
    mysql_check = MySql(common.CHECK_NAME, {},
                        instances=[instance_custom_queries])
    dd_run_check(mysql_check)

    aggregator.assert_metric('alice.age', value=25, tags=tags.METRIC_TAGS)
    aggregator.assert_metric('bob.age', value=20, tags=tags.METRIC_TAGS)
Beispiel #21
0
def test_only_custom_queries(aggregator, dd_run_check,
                             instance_custom_queries):
    instance_custom_queries['only_custom_queries'] = True
    check = MySql(common.CHECK_NAME, {}, [instance_custom_queries])
    dd_run_check(check)

    standard_metric_sets = [
        STATUS_VARS,
        VARIABLES_VARS,
        INNODB_VARS,
        BINLOG_VARS,
        OPTIONAL_STATUS_VARS,
        OPTIONAL_STATUS_VARS_5_6_6,
        GALERA_VARS,
        PERFORMANCE_VARS,
        SCHEMA_VARS,
        SYNTHETIC_VARS,
        REPLICA_VARS,
        GROUP_REPLICATION_VARS,
    ]
    for metric_set in standard_metric_sets:
        for metric_def in metric_set.values():
            metric = metric_def[0]
            aggregator.assert_metric(metric, count=0)

    # Internal check metrics are still allowed even if only_custom_queries is enabled
    internal_metrics = [
        m for m in aggregator.metric_names if m.startswith('dd.')
    ]
    for m in internal_metrics:
        aggregator.assert_metric(m, at_least=0)

    aggregator.assert_metric('alice.age', value=25, tags=tags.METRIC_TAGS)
    aggregator.assert_metric('bob.age', value=20, tags=tags.METRIC_TAGS)
    aggregator.assert_all_metrics_covered()
Beispiel #22
0
def test_replication_check_status(slave_io_running, slave_sql_running,
                                  check_status, instance_basic, aggregator):
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[instance_basic])
    mysql_check.service_check_tags = ['foo:bar']
    mocked_results = {
        'Slaves_connected': 1,
        'Binlog_enabled': True,
        'Slave_IO_Running': slave_io_running,
        'Slave_SQL_Running': slave_sql_running,
    }

    mysql_check._check_replication_status(mocked_results)

    aggregator.assert_service_check('mysql.replication.slave_running',
                                    check_status,
                                    tags=['foo:bar'],
                                    count=1)
Beispiel #23
0
def test_statement_samples_rate_limit(aggregator, bob_conn, dbm_instance):
    dbm_instance['statement_samples']['collections_per_second'] = 0.5
    query = "select name as nam from testdb.users where name = 'hello'"
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[dbm_instance])
    with closing(bob_conn.cursor()) as cursor:
        for _ in range(5):
            cursor.execute(query)
            mysql_check.check(dbm_instance)
            time.sleep(1)
    events = mysql_check._statement_samples._statement_samples_client.get_events(
    )
    matching = [e for e in events if e['db']['statement'] == query]
    assert len(
        matching
    ) == 1, "should have collected exactly one event due to sample rate limit"
    metrics = aggregator.metrics("dd.mysql.collect_statement_samples.time")
    assert 2 < len(metrics) < 6
Beispiel #24
0
def test_innodb_status_unicode_error(caplog):
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[{}])

    class MockCursor:
        def execute(self, command):
            raise UnicodeDecodeError('encoding', b'object', 0, 1, command)

        def close(self):
            return MockCursor()

    class MockDatabase:
        def cursor(self):
            return MockCursor()

    caplog.at_level(logging.WARNING)
    assert mysql_check._get_stats_from_innodb_status(MockDatabase()) == {}
    assert 'Unicode error while getting INNODB status' in caplog.text
Beispiel #25
0
def test_connection_with_sock():
    file_instance = {
        'host': 'localhost',
        'port': '123',
        'user': '******',
        'pass': '******',
        'sock': '/foo/bar',
    }
    check = MySql(common.CHECK_NAME, {}, [file_instance])
    connection_args = check._get_connection_args()
    assert connection_args == {
        'ssl': None,
        'connect_timeout': 10,
        'unix_socket': '/foo/bar',
        'user': '******',
        'passwd': 'pwd',
        'port': 123,
    }
def test_get_estimated_row_size_bytes(dbm_instance, file):
    check = MySql(CHECK_NAME, {}, [dbm_instance])
    test_activity = _load_test_activity_json(file)
    actual_size = len(
        json.dumps(test_activity,
                   default=check._query_activity._json_event_encoding))
    computed_size = 0
    for a in test_activity:
        computed_size += check._query_activity._get_estimated_row_size_bytes(a)
    assert abs((actual_size - computed_size) / float(actual_size)) <= 0.10
def test_truncate_on_max_size_bytes(dbm_instance, datadog_agent, rows,
                                    expected_len, expected_users):
    check = MySql(CHECK_NAME, {}, [dbm_instance])
    with mock.patch.object(datadog_agent, 'obfuscate_sql',
                           passthrough=True) as mock_agent:
        mock_agent.side_effect = "something"
        result_rows = check._query_activity._normalize_rows(rows)
        assert len(result_rows) == expected_len
        for index, user in enumerate(expected_users):
            assert result_rows[index]['processlist_user'] == user
def test_async_job_inactive_stop(aggregator, dd_run_check, dbm_instance):
    dbm_instance['query_activity']['run_sync'] = False
    check = MySql(CHECK_NAME, {}, [dbm_instance])
    dd_run_check(check)
    check._query_activity._job_loop_future.result()
    aggregator.assert_metric(
        "dd.mysql.async_job.inactive_stop",
        tags=_expected_dbm_instance_tags(dbm_instance),
        hostname='',
    )
def test_activity_metadata(aggregator, dd_run_check, dbm_instance,
                           datadog_agent, metadata, expected_metadata_payload):
    check = MySql(CHECK_NAME, {}, [dbm_instance])

    query = """
    -- Test comment
    SELECT id, name FROM testdb.users FOR UPDATE
    """
    query_signature = 'e7f7cb251194df29'

    def _run_test_query(conn, _query):
        conn.cursor().execute(_query)

    def _run_blocking(conn):
        conn.begin()
        conn.cursor().execute("SELECT id FROM testdb.users FOR UPDATE")

    def _obfuscate_sql(_query, options=None):
        return json.dumps({'query': _query, 'metadata': metadata})

    def _run_query_with_mock_obfuscator(conn, _query):
        # Execute the query with the mocked obfuscate_sql. The result should produce an event payload with the metadata.
        with mock.patch.object(datadog_agent,
                               'obfuscate_sql',
                               passthrough=True) as mock_agent:
            mock_agent.side_effect = _obfuscate_sql
            _run_test_query(conn, _query)

    bob_conn = _get_conn_for_user('bob')
    fred_conn = _get_conn_for_user('fred')

    executor = concurrent.futures.ThreadPoolExecutor(1)
    # bob's query blocks until the tx is completed
    executor.submit(_run_blocking, bob_conn)
    # fred's query will get blocked by bob, so it needs to be run asynchronously
    executor.submit(_run_query_with_mock_obfuscator, fred_conn, query)

    dd_run_check(check)
    bob_conn.commit()
    bob_conn.close()
    fred_conn.close()
    executor.shutdown()

    dbm_activity = aggregator.get_event_platform_events("dbm-activity")
    assert dbm_activity, "should have collected at least one activity"
    matching_activity = []
    for event in dbm_activity:
        for activity in event['mysql_activity']:
            if activity.get('query_signature') == query_signature:
                matching_activity.append(activity)
    assert len(matching_activity) == 1
    activity = matching_activity[0]
    assert activity['dd_tables'] == expected_metadata_payload['tables']
    assert activity['dd_commands'] == expected_metadata_payload['commands']
    assert activity['dd_comments'] == expected_metadata_payload['comments']
def test_activity_reported_hostname(aggregator, dbm_instance, dd_run_check,
                                    reported_hostname, expected_hostname):
    dbm_instance['reported_hostname'] = reported_hostname
    check = MySql(CHECK_NAME, {}, [dbm_instance])

    dd_run_check(check)
    dd_run_check(check)

    dbm_activity = aggregator.get_event_platform_events("dbm-activity")
    assert dbm_activity, "should have at least one activity sample"
    assert dbm_activity[0]['host'] == expected_hostname