def test_next_network_scan_at(allowed, last_end, next_time): folder = watolib.Folder(name="bla", title="Bla", attributes={ "network_scan": { 'exclude_ranges': [], 'ip_ranges': [('ip_range', ('10.3.1.1', '10.3.1.100'))], 'run_as': u'cmkadmin', 'scan_interval': 300, 'set_ipaddress': True, 'tag_criticality': 'offline', 'time_allowed': allowed, }, "network_scan_result": { "end": last_end, } }) with on_time("2018-01-10 02:00:00", "CET"): assert folder.next_network_scan_at() == next_time
def test_calculate_data_for_prediction(cfg_setup, utcdate, timezone, params): period_info = prediction._PREDICTION_PERIODS[params['period']] with on_time(utcdate, timezone): now = int(time.time()) assert callable(period_info.groupby) timegroup = period_info.groupby(now)[0] time_windows = prediction._time_slices(now, int(params["horizon"] * 86400), period_info, timegroup) hostname, service_description, dsname = 'test-prediction', "CPU load", 'load15' rrd_datacolumn = cmk.utils.prediction.rrd_datacolum( hostname, service_description, dsname, "MAX") data_for_pred = prediction._calculate_data_for_prediction( time_windows, rrd_datacolumn) expected_reference = _load_expected_result( "%s/tests/integration/cmk/base/test-files/%s/%s" % (repo_path(), timezone, timegroup)) assert isinstance(expected_reference, dict) assert sorted(data_for_pred) == sorted(expected_reference) for key in data_for_pred: if key == "points": for cal, ref in zip(data_for_pred['points'], expected_reference['points']): assert cal == pytest.approx(ref, rel=1e-12, abs=1e-12) else: # TypedDict key must be a string literal assert data_for_pred[key] == expected_reference[ key] # type: ignore[misc]
def test_cleanup_user_profiles_remove_abandoned(user_id): (profile := Path(config.config_dir, "profile")).mkdir() (bla := profile / "bla.mk").touch() with on_time('2018-04-15 16:50', 'CET'): os.utime(bla, (time.time(), time.time())) userdb.UserProfileCleanupBackgroundJob()._do_cleanup() assert not profile.exists()
def test_log_audit_with_object_diff(): old = { "a": "b", "b": "c", } new = { "b": "c", } with on_time('2018-04-15 16:50', 'CET'): log_audit( object_ref=None, action="bla", message="Message", user_id=UserId("calvin"), diff_text=make_diff_text(old, new), ) store = AuditLogStore(AuditLogStore.make_path()) assert store.read() == [ AuditLogStore.Entry( time=1523811000, object_ref=None, user_id='calvin', action='bla', text='Message', diff_text='Attribute "a" with value "b" removed.', ), ]
def test_calculate_data_for_prediction(cfg_setup, utcdate, timezone, params): period_info = prediction.prediction_periods[params['period']] with on_time(utcdate, timezone): now = int(time.time()) groupby = period_info["groupby"] assert callable(groupby) timegroup = groupby(now)[0] time_windows = prediction.time_slices(now, int(params["horizon"] * 86400), period_info, timegroup) hostname, service_description, dsname = 'test-prediction', "CPU load", 'load15' rrd_datacolumn = cmk.utils.prediction.rrd_datacolum(hostname, service_description, dsname, "MAX") data_for_pred = prediction.calculate_data_for_prediction(time_windows, rrd_datacolumn) path = "%s/tests-py3/integration/cmk/base/test-files/%s/%s" % (repo_path(), timezone, timegroup) reference = cmk.utils.prediction.retrieve_data_for_prediction(path, timegroup) data_points = data_for_pred.pop('points') assert reference is not None ref_points = reference.pop('points') for cal, ref in zip(data_points, ref_points): assert cal == pytest.approx(ref, rel=1e-12, abs=1e-12) assert data_for_pred == reference
def test_uptime_check_zero(): with on_time('2018-04-15 16:50', 'CET'): assert list(uptime_utils.check({}, uptime_utils.Section(0, None))) == [ Result(state=State.OK, summary='Up since Apr 15 2018 18:50:00'), Result(state=State.OK, summary='Uptime: 0 seconds'), Metric("uptime", 0.0), ]
def test_filters_filter_table(register_builtin_html, test, monkeypatch): # Needed for DeploymentTristateFilter test def deployment_states(host_name): return { "abc": { "target_aghash": "abc", }, "zzz": {}, }[host_name] if not cmk_version.is_raw_edition(): import cmk.gui.cee.agent_bakery as agent_bakery # pylint: disable=redefined-outer-name,import-outside-toplevel,no-name-in-module monkeypatch.setattr(agent_bakery, "get_cached_deployment_status", deployment_states) # Needed for FilterInvFloat test monkeypatch.setattr(cmk.gui.inventory, "get_inventory_data", get_inventory_data_patch) # Needed for FilterAggrServiceUsed test def is_part_of_aggregation_patch(what, site, host, service): return { ("s", "h", "srv1"): True }.get((site, host, service), False) monkeypatch.setattr(cmk.gui.bi, "is_part_of_aggregation", is_part_of_aggregation_patch) with html.stashed_vars(), on_time('2018-04-15 16:50', 'CET'): html.request.del_vars() for key, val in test.request_vars: html.request.set_var(key, val) # TODO: Fix this for real... if not cmk_version.is_raw_edition or test.ident != "deployment_has_agent": filt = cmk.gui.plugins.visuals.utils.filter_registry[test.ident]() assert filt.filter_table(test.rows) == test.expected_rows
def fixture_mock_time(): """Use this fixture for simple time + zone mocking Use this fixture instead of directly invoking on_time in case you don't need a specific time. Calling this once instead of on_time() a lot of times saves execution time. """ with on_time(1572247138, "CET"): yield
def test_check(value_store_patch, section, params, expected): with on_time(*NOW_SIMULATED): with value_store.context(CheckPluginName("oracle_asm_diskgroup"), None): yielded_results = list( oracle_asm_diskgroup.check_oracle_asm_diskgroup( ITEM, params, section)) assert yielded_results == expected
def test_tuple_value_to_json_conversion(value, result): with on_time("2020-03-02", "UTC"): assert vs.Tuple([vs.AbsoluteDate(), vs.AbsoluteDate()]).value_to_text(value) == result json_value = vs.Tuple([vs.AbsoluteDate(), vs.AbsoluteDate()]).value_to_json(value) assert vs.Tuple([vs.AbsoluteDate(), vs.AbsoluteDate() ]).value_from_json(json_value) == value
def test_get_annotation_date_render_function(annotation_times, result): annotations = [((None, None, None), { "from": s, "until": e }) for s, e in annotation_times] with on_time(1572253746, "CET"): assert availability.get_annotation_date_render_function( annotations, {"range": ((1543446000, 1543446000 + 86399), "bla")}) == result
def test_uptime_check_basic(): with on_time('2018-04-15 16:50', 'CET'): assert list(uptime_utils.check({}, uptime_utils.Section( 123, None))) == [ Result(state=State.OK, summary='Up since Apr 15 2018 18:47:57'), Result(state=State.OK, summary='Uptime: 2 minutes 3 seconds'), Metric("uptime", 123.0), ]
def test_check_ps_common(check_manager, monkeypatch, inv_item, reference): check = check_manager.get_check("ps") parsed = sum([check.context['parse_ps'](info)[1] for info in generate_inputs()], []) total_ram = 1024**3 if "emacs" in inv_item[0] else None with on_time(1540375342, "CET"): factory_defaults = {"levels": (1, 1, 99999, 99999)} factory_defaults.update(inv_item[1]) test_result = CheckResult(check.context["check_ps_common"]( inv_item[0], factory_defaults, parsed, total_ram=total_ram)) assertCheckResultsEqual(test_result, reference)
def run_check_ps_common_with_elapsed_time(check_time, cputime): with on_time(check_time, "CET"): agent_info = """(on,2275004,434008,00:00:49/26:58,25576) firefox (on,1869920,359836,00:01:23/6:57,25664) firefox (on,7962644,229660,00:00:10/26:56,25758) firefox (on,1523536,83064,00:{:02}:00/26:55,25898) firefox""" parsed = check.context['parse_ps'](splitter(agent_info.format(cputime)))[1] return CheckResult(check.context["check_ps_common"]( 'firefox', params, parsed, cpu_cores=cpu_cores))
def test_open_log(tmp_path): log_file = tmp_path / "test.log" log.open_log(log_file) with on_time('2018-04-15 16:50', 'CET'): log.logger.warning("abc") log.logger.warning("äbc") with log_file.open("rb") as f: assert f.read() == (b"2018-04-15 18:50:00,000 [30] [cmk] abc\n" b"2018-04-15 18:50:00,000 [30] [cmk] \xc3\xa4bc\n")
def test_timerange_value_to_json_conversion(): with on_time("2020-03-02", "UTC"): for choice in vs.Timerange().choices(): if choice[0] == "age": choice = (("age", 12345), "The last..., 3 hours 25 minutes 45 seconds", None) elif choice[0] == "date": choice = (("date", (1582671600.0, 1582844400.0)), "Date range, 2020-02-25, 2020-02-27", None) assert vs.Timerange().value_to_text(choice[0]) == choice[1] json_value = vs.Timerange().value_to_json(choice[0]) assert vs.Timerange().value_from_json(json_value) == choice[0]
def test_uptime_solaris_inputs(check_manager, info, reference): check = check_manager.get_check("uptime") parsed = check.run_parse(info) # This time freeze has no correlation with the uptime of the test. It # is needed for the check output to always return the same infotext. # The true test happens on state and perfdata with on_time('2018-04-15 16:50', 'CET'): result = CheckResult(check.run_check(None, {}, parsed)) assertCheckResultsEqual(result, CheckResult(reference))
def test_cleanup_user_profiles_keep_active_profile_old(user_id): profile_dir = Path(config.config_dir, user_id) assert profile_dir.exists() with on_time('2018-04-15 16:50', 'CET'): for file_path in profile_dir.glob("*.mk"): os.utime(file_path, (time.time(), time.time())) userdb.UserProfileCleanupBackgroundJob()._do_cleanup() assert Path(config.config_dir, user_id).exists()
def test_uptime_solaris_inputs(info, reference): section = uptime.parse_uptime(info) assert section is not None # This time freeze has no correlation with the uptime of the test. It # is needed for the check output to always return the same infotext. # The true test happens on state and perfdata with on_time('2018-04-15 16:50', 'CET'): result = list(uptime_utils.check(Parameters({}), section)) assert result == reference
def test_process_job_stats( job_data, age_levels, exit_code_to_state_map, expected_results, ): with on_time(*TIME): assert list(job._process_job_stats( job_data, age_levels, exit_code_to_state_map, )) == list(expected_results)
def test_node_timestamps_non_utc(): node_names = ['node1', 'node2', 'node3'] stat_time_formatted = [ "2019-03-01T10:44:58.19881199+01:00", "2019-03-01T10:44:55.383089539+01:00", "2019-03-01T10:44:51.42243614+01:00", ] with on_time(1572253746, "CET"): stats = cluster_stats(node_names, stat_time_formatted) utc_timestamp_average = 1551429894.7 assert stats['timestamp'] == pytest.approx(utc_timestamp_average),\ "The timestamp of a cluster has to be the average timestamp of its nodes"
def test_refresh_session_success(user_id, session_valid): session_infos = userdb._load_session_infos(user_id) assert session_infos old_session = session_infos[session_valid] with on_time("2019-09-05 00:00:30", "UTC"): userdb._refresh_session(user_id, session_valid) new_session_infos = userdb._load_session_infos(user_id) new_session = new_session_infos[session_valid] assert old_session.session_id == new_session.session_id assert new_session.last_activity > old_session.last_activity
def test_get_rrd_data(cfg_setup, utcdate, timezone, period, result): with on_time(utcdate, timezone): timestamp = time.time() _, from_time, until_time, _ = prediction.get_prediction_timegroup( int(timestamp), prediction.prediction_periods[period]) timeseries = cmk.utils.prediction.get_rrd_data('test-prediction', 'CPU load', 'load15', 'MAX', from_time, until_time) assert timeseries.start <= from_time assert timeseries.end >= until_time assert (timeseries.step, len(timeseries.values)) == result
def test_node_timestamps_utc(): node_names = ['node1', 'node2', 'node3'] stat_time_formatted = [ '2019-02-15T13:53:27.825541873Z', '2019-02-15T13:53:29.796754852Z', '2019-02-15T13:53:20.663979637Z', ] with on_time(1572253746, "CET"): stats = cluster_stats(node_names, stat_time_formatted) utc_timestamp_average = 1550235205.3 assert stats['timestamp'] == pytest.approx(utc_timestamp_average),\ "The timestamp of a cluster has to be the average timestamp of its nodes"
def test_filters_filter(register_builtin_html, test, monkeypatch): # Needed for ABCFilterCustomAttribute monkeypatch.setattr(cmk.gui.config, "wato_host_attrs", [{"name": "bla", "title": "Bla"}]) # Need for ABCTagFilter monkeypatch.setattr(cmk.gui.config, "tags", cmk.utils.tags.BuiltinTagConfig()) with html.stashed_vars(), on_time('2018-04-15 16:50', 'CET'): html.request.del_vars() for key, val in test.request_vars: html.request.set_var(key, val) filt = cmk.gui.plugins.visuals.utils.filter_registry[test.ident]() assert filt.filter(infoname="bla") == test.expected_filters
def test_time_slices(utcdate, timezone, horizon, period_info, timegroup, result): """Find period slices for predictive levels More than a test is an exemplification of our convention Predictive levels work on local times, because they are linked to human routines. """ with on_time(utcdate, timezone): timestamp = time.time() print(timestamp) slices = prediction._time_slices(int(timestamp), horizon, period_info, timegroup) pprint([('ontz', x, time.ctime(x), time.ctime(y)) for x, y in slices]) pprint([('sys', x, time.ctime(x), time.ctime(y)) for x, y in slices]) assert slices == result
def test_timerange_value_to_json_conversion(): with on_time("2020-03-02", "UTC"): for ident, title, _vs in vs.Timerange().choices(): choice_value: vs.CascadingDropdownChoiceValue = ident if ident == "age": choice_value = ("age", 12345) title = "The last..., 3 hours 25 minutes 45 seconds" elif ident == "date": choice_value = ("date", (1582671600.0, 1582844400.0)) title = "Date range, 2020-02-25, 2020-02-27" assert vs.Timerange().value_to_text(choice_value) == title json_value = vs.Timerange().value_to_json(choice_value) assert vs.Timerange().value_from_json(json_value) == choice_value
def time_info(service, agent_info, check_time, cputime, cpu_cores): with on_time(datetime.datetime.utcfromtimestamp(check_time), "CET"): _cpu_info, parsed_lines = ps_section.parse_ps(splitter(agent_info.format(cputime))) lines_with_node_name: List[Tuple[Optional[str], ps_utils.ps_info, List[str]]] = [ (None, ps_info, cmd_line) for (ps_info, cmd_line) in parsed_lines] return list(ps_utils.check_ps_common( label="Processes", item=service.item, params=service.parameters, # type: ignore[arg-type] process_lines=lines_with_node_name, cpu_cores=cpu_cores, total_ram=None, ))
def test_refresh_session_success(user_id, session_valid): session_infos = userdb._load_session_infos(user_id) assert session_infos old_session = userdb.SessionInfo(**asdict(session_infos[session_valid])) with on_time("2019-09-05 00:00:30", "UTC"): userdb._set_session(user_id, session_infos[session_valid]) userdb._refresh_session(user_id, session_infos[session_valid]) userdb.on_end_of_request(user_id) new_session_infos = userdb._load_session_infos(user_id) new_session = new_session_infos[session_valid] assert old_session.session_id == new_session.session_id assert new_session.last_activity > old_session.last_activity
def test_dictionary_value_to_json_conversion(value, result): with on_time("2020-03-02", "UTC"): # TODO: Obtain this valuespec directly by importing AlertBarChartDashlet # once it's available and simplify to: # abcd_vs = AlertBarChartDashlet.vs_parameters() abcd_vs = vs.Dictionary([ ("time_range", vs.Timerange(title="Time range")), ("time_resolution", vs.DropdownChoice(title="Time resolution", choices=[("h", "Show alerts per hour"), ("d", "Show alerts per day")])), ]) abcd_vs._render = "oneline" assert abcd_vs.value_to_text(value) == result json_value = abcd_vs.value_to_json(value) assert abcd_vs.value_from_json(json_value) == value