Example #1
0
def test_evaluate_downtimes(monkeypatch):
    downtimes = [{
        'id': 'dt-active',
        'start_time': 0,
        'end_time': time.time() + ONE_DAY
    }, {
        'id': 'dt-expired',
        'start_time': 0,
        'end_time': 2
    }, {
        'id': 'dt-future',
        'start_time': time.time() + ONE_DAY,
        'end_time': time.time() + (2 * ONE_DAY)
    }]
    downtimes_active = downtimes[:1]  # only the first one is still active

    # mock Redis
    con = MagicMock()
    con.pipeline.return_value.execute.return_value = (['ent1'], {
        'dt-active':
        json.dumps(downtimes[0]),
        'dt-expired':
        json.dumps(downtimes[1]),
        'dt-future':
        json.dumps(downtimes[2])
    })
    monkeypatch.setattr(MainTask, 'con', con)
    MainTask.configure({})
    task = MainTask()
    result = task._evaluate_downtimes(1, 'ent1')
    assert downtimes_active == result
def test_main_task_configure_tags(monkeypatch, tags, result):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    MainTask.configure({'zmon.entity.tags': tags})
    task = MainTask()

    assert task._entity_tags == result
Example #3
0
def test_main_task_configure_tags(monkeypatch, tags, result):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    MainTask.configure({'zmon.entity.tags': tags})
    task = MainTask()

    assert task._entity_tags == result
def test_main_task_sampling(monkeypatch, sampling_config, check_id, interval, is_alert, is_changed, is_sampled):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    span = MagicMock()

    MainTask.configure({'account': '123'})
    task = MainTask()

    assert task.is_sampled(sampling_config, check_id, interval, is_alert, is_changed, span) is is_sampled
def test_check(monkeypatch):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    MainTask.configure({})
    task = MainTask()
    monkeypatch.setattr(task, '_get_check_result', MagicMock())
    monkeypatch.setattr(task, '_store_check_result', MagicMock())
    monkeypatch.setattr(task, 'send_metrics', MagicMock())
    req = {'check_id': 123, 'entity': {'id': 'myent'}}
    task.check(req)
Example #6
0
def test_main_task_sampling(monkeypatch, sampling_config, check_id, interval,
                            is_alert, is_changed, is_sampled):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    span = MagicMock()

    MainTask.configure({'account': '123'})
    task = MainTask()

    assert task.is_sampled(sampling_config, check_id, interval, is_alert,
                           is_changed, span) is is_sampled
def test_store_kairosdb(monkeypatch, result, expected):
    post = MagicMock()
    monkeypatch.setattr('requests.post', post)
    MainTask.configure({'kairosdb.enabled': True, 'kairosdb.host': 'example.org', 'kairosdb.port': 8080})
    task = MainTask()
    task._store_check_result_to_kairosdb({'check_id': 123,
                                          'entity': {'id': '77', 'type': 'test'}}, result)
    args, kwargs = post.call_args
    assert args[0] == 'http://example.org:8080/api/v1/datapoints'
    # decode JSON again to make the test stable (to not rely on dict key order)
    assert expected == json.loads(args[1])
    assert kwargs == {'timeout': 2}
def test_send_to_dataservice(monkeypatch):
    check_results = [{'check_id': 123, 'ts': 10, 'value': 'CHECK-VAL'}]
    expected = {'account': 'myacc', 'team': 'myteam', 'region': 'eu-west-1', 'results': check_results}

    put = MagicMock()
    monkeypatch.setattr('requests.put', put)
    monkeypatch.setattr('tokens.get', lambda x: 'mytok')

    MainTask.configure({'account': expected['account'], 'team': expected['team'], 'region': expected['region'],
                        'dataservice.url': 'https://example.org', 'dataservice.oauth2': True})
    MainTask.send_to_dataservice(check_results)
    args, kwargs = put.call_args
    assert args[0] == 'https://example.org/api/v2/data/myacc/123/eu-west-1'
    assert expected == json.loads(kwargs['data'])
Example #9
0
def test_send_to_dataservice(monkeypatch):
    check_results = [{'check_id': 123, 'ts': 10, 'value': 'CHECK-VAL'}]
    expected = {'account': 'myacc', 'team': 'myteam', 'region': 'eu-west-1', 'results': check_results}

    put = MagicMock()
    monkeypatch.setattr('requests.put', put)
    monkeypatch.setattr('tokens.get', lambda x: 'mytok')

    MainTask.configure({'account': expected['account'], 'team': expected['team'], 'region': expected['region'],
                        'dataservice.url': 'https://example.org', 'dataservice.oauth2': True})
    MainTask.send_to_dataservice(check_results)
    args, kwargs = put.call_args
    assert args[0] == 'https://example.org/api/v2/data/myacc/123/eu-west-1'
    assert expected == json.loads(kwargs['data'])
def test_main_task_sampling_rate(monkeypatch, sampling_config, check_id, is_alert, is_changed):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    span = MagicMock()

    MainTask.configure({'account': '123'})
    task = MainTask()

    results = [task.is_sampled(sampling_config, check_id, 60, is_alert, is_changed, span) for _ in range(100)]
    sampled = len([s for s in results if s])

    # We give some margin of error due to probabilistic non-uniform sampling
    assert sampled >= 5 and sampled <= 20
def test_evaluate_downtimes(monkeypatch):
    downtimes = [{'id': 'dt-active', 'start_time': 0, 'end_time': time.time() + ONE_DAY},
                 {'id': 'dt-expired', 'start_time': 0, 'end_time': 2},
                 {'id': 'dt-future', 'start_time': time.time() + ONE_DAY, 'end_time': time.time() + (2 * ONE_DAY)}]
    downtimes_active = downtimes[:1]  # only the first one is still active

    # mock Redis
    con = MagicMock()
    con.pipeline.return_value.execute.return_value = (['ent1'], {'dt-active': json.dumps(downtimes[0]),
                                                                 'dt-expired': json.dumps(downtimes[1]),
                                                                 'dt-future': json.dumps(downtimes[2])})
    monkeypatch.setattr(MainTask, 'con', con)
    MainTask.configure({})
    task = MainTask()
    result = task._evaluate_downtimes(1, 'ent1')
    assert downtimes_active == result
Example #12
0
def test_evaluate_alert(monkeypatch):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager
    plugin_manager.collect_plugins()

    # mock Redis
    con = MagicMock()
    monkeypatch.setattr(MainTask, 'con', con)
    MainTask.configure({})
    task = MainTask()
    alert_def = {'id': 1, 'check_id': 123, 'condition': '>0', 'parameters': {'p1': {'value': 'x'}}}
    req = {'check_id': 123,
           'entity': {'id': '77', 'type': 'test'}}
    result = {'ts': 10, 'value': 0}
    is_alert, captures = task.evaluate_alert(alert_def, req, result)
    assert {'p1': 'x'} == captures
    assert not is_alert

    # change value over threshold
    result = {'ts': 10, 'value': 1}
    is_alert, captures = task.evaluate_alert(alert_def, req, result)
    assert {'p1': 'x'} == captures
    assert is_alert

    # produce exception
    alert_def['condition'] = 'value["missing-key"] > 0'
    is_alert, captures = task.evaluate_alert(alert_def, req, result)
    assert {'p1': 'x', 'exception': "'int' object has no attribute '__getitem__'"} == captures
    assert is_alert
Example #13
0
def test_notify(monkeypatch):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager
    plugin_manager.collect_plugins()

    # mock Redis
    con = MagicMock()
    monkeypatch.setattr(MainTask, 'con', con)
    monkeypatch.setattr(MainTask, '_evaluate_downtimes', lambda self, x, y: [])
    MainTask.configure({})
    task = MainTask()
    alert_def = {'id': 42, 'check_id': 123, 'condition': '>0', 'parameters': {'p1': {'value': 'x'}}}
    req = {'check_id': 123,
           'check_name': 'My Check',
           'entity': {'id': '77', 'type': 'test'}}
    result = {'ts': 10, 'value': 0}
    notify_result = task.notify(result, req, [alert_def])
    assert [] == notify_result

    # 1 > 0 => trigger active alert!
    result = {'ts': 10, 'value': 1}
    notify_result = task.notify(result, req, [alert_def])
    assert [alert_def['id']] == notify_result

    # alert is not in time period
    alert_def['period'] = 'year {1980}'
    result = {'ts': 10, 'value': 1}
    notify_result = task.notify(result, req, [alert_def])
    assert [] == notify_result
Example #14
0
def test_main_task_sampling_rate(monkeypatch, sampling_config, check_id,
                                 is_alert, is_changed):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    span = MagicMock()

    MainTask.configure({'account': '123'})
    task = MainTask()

    results = [
        task.is_sampled(sampling_config, check_id, 60, is_alert, is_changed,
                        span) for _ in range(100)
    ]
    sampled = len([s for s in results if s])

    # We give some margin of error due to probabilistic non-uniform sampling
    assert sampled >= 5 and sampled <= 20
Example #15
0
def test_notify(monkeypatch):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager
    plugin_manager.collect_plugins()

    # mock Redis
    con = MagicMock()
    monkeypatch.setattr(MainTask, 'con', con)
    monkeypatch.setattr(MainTask, '_evaluate_downtimes', lambda self, x, y: [])
    MainTask.configure({})
    task = MainTask()
    alert_def = {'id': 42, 'check_id': 123, 'condition': '>0', 'parameters': {'p1': {'value': 'x'}}}
    req = {'check_id': 123,
           'check_name': 'My Check',
           'entity': {'id': '77', 'type': 'test'}}
    result = {'ts': 10, 'value': 0}
    notify_result = task.notify(result, req, [alert_def])
    assert [] == notify_result

    # 1 > 0 => trigger active alert!
    result = {'ts': 10, 'value': 1}
    notify_result = task.notify(result, req, [alert_def])
    assert [alert_def['id']] == notify_result

    # alert is not in time period
    alert_def['period'] = 'year {1980}'
    result = {'ts': 10, 'value': 1}
    notify_result = task.notify(result, req, [alert_def])
    assert [] == notify_result
def test_evaluate_alert(monkeypatch):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager
    plugin_manager.collect_plugins()

    # mock Redis
    con = MagicMock()
    monkeypatch.setattr(MainTask, 'con', con)
    MainTask.configure({})
    task = MainTask()
    alert_def = {'id': 1, 'check_id': 123, 'condition': '>0', 'parameters': {'p1': {'value': 'x'}}}
    req = {'check_id': 123,
           'entity': {'id': '77', 'type': 'test'}}
    result = {'ts': 10, 'value': 0}
    is_alert, captures = task.evaluate_alert(alert_def, req, result)
    assert {'p1': 'x'} == captures
    assert not is_alert

    # change value over threshold
    result = {'ts': 10, 'value': 1}
    is_alert, captures = task.evaluate_alert(alert_def, req, result)
    assert {'p1': 'x'} == captures
    assert is_alert

    # produce exception
    alert_def['condition'] = 'value["missing-key"] > 0'
    is_alert, captures = task.evaluate_alert(alert_def, req, result)
    assert 'p1' in captures and captures.get('p1') == 'x'
    assert 'exception' in captures and "'int' object has no attribute '__getitem__'" in captures.get('exception')
    assert is_alert
def test_check_trial_run_result_size_violation(monkeypatch, fx_big_result):
    config, result = fx_big_result

    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    monkeypatch.setattr('zmon_worker_monitor.zmon_worker.tasks.main.MAX_RESULT_SIZE', 2)  # Lower default limit to 2K

    MainTask.configure(config)
    task = MainTask()

    get_result = MagicMock()
    get_result.return_value = {'value': result}

    monkeypatch.setattr(task, '_get_check_result_internal', get_result)
    monkeypatch.setattr(task, '_store_check_result', MagicMock())
    monkeypatch.setattr(task, 'send_metrics', MagicMock())

    req = {'check_id': 123, 'entity': {'id': 'myent'}}

    with pytest.raises(ResultSizeError):
        task.check_for_trial_run(req)
Example #18
0
def test_check(monkeypatch):
    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    MainTask.configure({})
    task = MainTask()
    monkeypatch.setattr(task, '_get_check_result', MagicMock())
    monkeypatch.setattr(task, '_store_check_result', MagicMock())
    monkeypatch.setattr(task, 'send_metrics', MagicMock())
    req = {'check_id': 123, 'entity': {'id': 'myent'}}
    task.check(req)
Example #19
0
def test_store_kairosdb(monkeypatch, result, expected):
    post = MagicMock()
    monkeypatch.setattr('requests.post', post)
    MainTask.configure({'kairosdb.enabled': True, 'kairosdb.host': 'example.org', 'kairosdb.port': 8080})
    task = MainTask()
    task._store_check_result_to_kairosdb({'check_id': 123,
                                          'entity': {'id': '77', 'type': 'test'}}, result)
    args, kwargs = post.call_args
    assert args[0] == 'http://example.org:8080/api/v1/datapoints'
    # decode JSON again to make the test stable (to not rely on dict key order)
    assert expected == json.loads(args[1])
    assert kwargs == {'timeout': 2}
Example #20
0
def test_check_result_size_violation(monkeypatch, fx_big_result):
    config, result = fx_big_result

    reload(plugin_manager)
    plugin_manager.init_plugin_manager()  # init plugin manager

    monkeypatch.setattr('zmon_worker_monitor.zmon_worker.tasks.main.MAX_RESULT_SIZE', 2)  # Lower default limit to 2K

    MainTask.configure(config)
    task = MainTask()

    get_result = MagicMock()
    get_result.return_value = {'value': result}

    monkeypatch.setattr(task, '_get_check_result_internal', get_result)
    monkeypatch.setattr(task, '_store_check_result', MagicMock())
    monkeypatch.setattr(task, 'send_metrics', MagicMock())

    req = {'check_id': 123, 'entity': {'id': 'myent'}}

    with pytest.raises(ResultSizeError):
        task.check(req)