예제 #1
0
def pending_active_services():
    sites.live().set_prepend_site(True)
    services = sites.live().query("GET services\nFilter: has_been_checked = 0\nFilter: check_type = 0\nColumns: host_name description")
    sites.live().set_prepend_site(False)
    entries = [ (site, "%s;%s" % (host_name, service_description))
                for (site, host_name, service_description) in services ]
    return entries
예제 #2
0
def page_show():
    site = html.var("site")  # optional site hint
    host_name = html.var("host", "")
    file_name = html.var("file", "")

    # Fix problem when URL is missing certain illegal characters
    try:
        file_name = form_file_to_ext(
            find_matching_logfile(site, host_name,
                                  form_file_to_int(file_name)))
    except livestatus.MKLivestatusNotFoundError:
        pass  # host_name log dir does not exist

    # Acknowledging logs is supported on
    # a) all logs on all hosts
    # b) all logs on one host_name
    # c) one log on one host_name
    if html.has_var('_ack') and not html.var("_do_actions") == _("No"):
        sites.live().set_auth_domain('action')
        do_log_ack(site, host_name, file_name)
        return

    if not host_name:
        show_log_list()
        return

    if file_name:
        show_file(site, host_name, file_name)
    else:
        show_host_log_list(site, host_name)
예제 #3
0
def page_show():
    site = html.var("site") # optional site hint
    host_name = html.var("host", "")
    file_name = html.var("file", "")

    # Fix problem when URL is missing certain illegal characters
    try:
        file_name = form_file_to_ext(find_matching_logfile(site, host_name, form_file_to_int(file_name)))
    except livestatus.MKLivestatusNotFoundError:
        pass # host_name log dir does not exist

    # Acknowledging logs is supported on
    # a) all logs on all hosts
    # b) all logs on one host_name
    # c) one log on one host_name
    if html.has_var('_ack') and not html.var("_do_actions") == _("No"):
        sites.live().set_auth_domain('action')
        do_log_ack(site, host_name, file_name)
        return

    if not host_name:
        show_log_list()
        return

    if file_name:
        show_file(site, host_name, file_name)
    else:
        show_host_log_list(site, host_name)
예제 #4
0
def pending_active_services():
    sites.live().set_prepend_site(True)
    services = sites.live().query("GET services\nFilter: has_been_checked = 0\nFilter: check_type = 0\nColumns: host_name description")
    sites.live().set_prepend_site(False)
    entries = [ (site, "%s;%s" % (host_name, service_description))
                for (site, host_name, service_description) in services ]
    return entries
예제 #5
0
def get_stats_per_site(only_sites, stats_keys):
    try:
        sites.live().set_only_sites(only_sites)
        for list_row in sites.live().query(
                "GET eventconsolestatus\nColumns: %s" % " ".join(stats_keys)):
            yield dict(zip(stats_keys, list_row))
    finally:
        sites.live().set_only_sites(None)
예제 #6
0
def all_logs():
    sites.live().set_prepend_site(True)
    rows = sites.live().query(
        "GET hosts\n"
        "Columns: name mk_logwatch_files\n"
    )
    sites.live().set_prepend_site(False)
    return rows
def all_logs():
    sites.live().set_prepend_site(True)
    rows = sites.live().query(
        "GET hosts\n"
        "Columns: name mk_logwatch_files\n"
    )
    sites.live().set_prepend_site(False)
    return rows
예제 #8
0
def execute_command(name, args=None, site=None):
    if args:
        formated_args = ";" + ";".join(args)
    else:
        formated_args = ""

    query = "[%d] EC_%s%s" % (int(time.time()), name, formated_args)
    sites.live().command(query, site)
예제 #9
0
def execute_command(name, args=None, site=None):
    if args:
        formated_args = ";" + ";".join(args)
    else:
        formated_args = ""

    query = "[%d] EC_%s%s" % (int(time.time()), name, formated_args)
    sites.live().command(query, site)
예제 #10
0
def action_reschedule():
    if not config.may("action.reschedule"):
        raise MKGeneralException("You are not allowed to reschedule checks.")

    site = html.var("site")
    host = html.var("host", "")
    if not host:
        raise MKGeneralException("Action reschedule: missing host name")

    service  = html.get_unicode_input("service",  "")
    wait_svc = html.get_unicode_input("wait_svc", "")

    if service:
        cmd = "SVC"
        what = "service"
        spec = "%s;%s" % (host, service.encode("utf-8"))

        if wait_svc:
            wait_spec = u'%s;%s' % (host, wait_svc)
            add_filter = "Filter: service_description = %s\n" % lqencode(wait_svc)
        else:
            wait_spec = spec
            add_filter = "Filter: service_description = %s\n" % lqencode(service)
    else:
        cmd = "HOST"
        what = "host"
        spec = host
        wait_spec = spec
        add_filter = ""

    try:
        now = int(time.time())
        sites.live().command("[%d] SCHEDULE_FORCED_%s_CHECK;%s;%d" % (now, cmd, lqencode(spec), now), site)
        sites.live().set_only_sites([site])
        query = u"GET %ss\n" \
                "WaitObject: %s\n" \
                "WaitCondition: last_check >= %d\n" \
                "WaitTimeout: %d\n" \
                "WaitTrigger: check\n" \
                "Columns: last_check state plugin_output\n" \
                "Filter: host_name = %s\n%s" \
                % (what, lqencode(wait_spec), now, config.reschedule_timeout * 1000, lqencode(host), add_filter)
        row = sites.live().query_row(query)
        sites.live().set_only_sites()
        last_check = row[0]
        if last_check < now:
            html.write("['TIMEOUT', 'Check not executed within %d seconds']\n" % (config.reschedule_timeout))
        else:
            if service == "Check_MK":
                # Passive services triggered by Check_MK often are updated
                # a few ms later. We introduce a small wait time in order
                # to increase the chance for the passive services already
                # updated also when we return.
                time.sleep(0.7);
            html.write("['OK', %d, %d, %r]\n" % (row[0], row[1], row[2].encode("utf-8")))

    except Exception, e:
        sites.live().set_only_sites()
        raise MKGeneralException(_("Cannot reschedule check: %s") % e)
예제 #11
0
def wait_for_pending(what, generator_function, tries):
    entries = generator_function()
    for try_number in range(tries):
        for site, entry in entries:
            sites.live().command("[1231231233] SCHEDULE_FORCED_%s_CHECK;%s;%d" % (what.upper(), entry, time.time()), sitename = site)
        time.sleep(0.3)
        entries = generator_function()
        if not entries:
            html.message("All %ss are checked.\n" % what)
            break

    else:
        html.message("Reschedule failed after %d tries. Still pending %ss: %s\n" % (tries, what, ", ".join([e[1] for e in entries])))
예제 #12
0
def wait_for_pending(what, generator_function, tries):
    entries = generator_function()
    for try_number in range(tries):
        for site, entry in entries:
            sites.live().command("[1231231233] SCHEDULE_FORCED_%s_CHECK;%s;%d" % (what.upper(), entry, time.time()), sitename = site)
        time.sleep(0.3)
        entries = generator_function()
        if not entries:
            html.message("All %ss are checked.\n" % what)
            break

    else:
        html.message("Reschedule failed after %d tries. Still pending %ss: %s\n" % (tries, what, ", ".join([e[1] for e in entries])))
예제 #13
0
def load_failed_notifications(before=None,
                              after=None,
                              stat_only=False,
                              extra_headers=None):
    may_see_notifications =\
        config.user.may("general.see_failed_notifications") or\
        config.user.may("general.see_failed_notifications_24h")

    if not may_see_notifications:
        return None

    query_filters = [
        "class = 3",
        "log_type = SERVICE NOTIFICATION RESULT",
    ]

    if before is not None:
        query_filters.append("time <= %d" % before)
    if after is not None:
        query_filters.append("time >= %d" % after)
    if may_see_notifications and not config.user.may(
            "general.see_failed_notifications"):
        query_filters.append("time > %d" % (int(time.time()) - 86400))

    query = ["GET log"]
    if stat_only:
        query.append("Stats: log_state != 0")
    else:
        query.append("Columns: %s" % " ".join(g_columns))
        query_filters.append("log_state != 0")
    query += ["Filter: %s" % filt for filt in query_filters]

    query = "\n".join(query)

    if extra_headers is not None:
        query += extra_headers

    if stat_only:
        result = sites.live().query_summed_stats(query)
        if result is None:
            result = [0]  # Normalize the result when no site answered

        if result[0] == 0 and not sites.live().dead_sites():
            # In case there are no errors and all sites are reachable:
            # advance the users acknowledgement time
            acknowledge_failed_notifications(time.time())

        return result

    else:
        return sites.live().query(query)
예제 #14
0
def ajax_switch_masterstate():
    site = html.var("site")
    column = html.var("switch")
    state = int(html.var("state"))
    commands = {
        ("enable_notifications", 1): "ENABLE_NOTIFICATIONS",
        ("enable_notifications", 0): "DISABLE_NOTIFICATIONS",
        ("execute_service_checks", 1): "START_EXECUTING_SVC_CHECKS",
        ("execute_service_checks", 0): "STOP_EXECUTING_SVC_CHECKS",
        ("execute_host_checks", 1): "START_EXECUTING_HOST_CHECKS",
        ("execute_host_checks", 0): "STOP_EXECUTING_HOST_CHECKS",
        ("enable_flap_detection", 1): "ENABLE_FLAP_DETECTION",
        ("enable_flap_detection", 0): "DISABLE_FLAP_DETECTION",
        ("process_performance_data", 1): "ENABLE_PERFORMANCE_DATA",
        ("process_performance_data", 0): "DISABLE_PERFORMANCE_DATA",
        ("enable_event_handlers", 1): "ENABLE_EVENT_HANDLERS",
        ("enable_event_handlers", 0): "DISABLE_EVENT_HANDLERS",
    }

    command = commands.get((column, state))
    if command:
        sites.live().command("[%d] %s" % (int(time.time()), command), site)
        sites.live().set_only_sites([site])
        sites.live().query("GET status\nWaitTrigger: program\nWaitTimeout: 10000\nWaitCondition: %s = %d\nColumns: %s\n" % \
               (column, state, column))
        sites.live().set_only_sites()
        render_master_control()
    else:
        html.write(
            _("Command %s/%d not found") % (html.attrencode(column), state))
예제 #15
0
def get_logfile_lines(site, host_name, file_name):
    if site:  # Honor site hint if available
        sites.live().set_only_sites([site])
    query = \
        "GET hosts\n" \
        "Columns: mk_logwatch_file:file:%s\n" \
        "Filter: name = %s\n" % (lqencode(file_name.replace('\\', '\\\\').replace(' ', '\\s')), lqencode(host_name))
    file_content = sites.live().query_value(query)
    if site:  # Honor site hint if available
        sites.live().set_only_sites(None)
    if file_content == None:
        return None
    else:
        return file_content.splitlines()
예제 #16
0
def get_logfile_lines(site, host_name, file_name):
    if site: # Honor site hint if available
        sites.live().set_only_sites([site])
    query = \
        "GET hosts\n" \
        "Columns: mk_logwatch_file:file:%s\n" \
        "Filter: name = %s\n" % (lqencode(file_name.replace('\\', '\\\\').replace(' ', '\\s')), lqencode(host_name))
    file_content = sites.live().query_value(query)
    if site: # Honor site hint if available
        sites.live().set_only_sites(None)
    if file_content == None:
        return None
    else:
        return file_content.splitlines()
예제 #17
0
def ajax_switch_masterstate():
    site = html.var("site")
    column = html.var("switch")
    state = int(html.var("state"))
    commands = {
        ("enable_notifications", 1): "ENABLE_NOTIFICATIONS",
        ("enable_notifications", 0): "DISABLE_NOTIFICATIONS",
        ("execute_service_checks", 1): "START_EXECUTING_SVC_CHECKS",
        ("execute_service_checks", 0): "STOP_EXECUTING_SVC_CHECKS",
        ("execute_host_checks", 1): "START_EXECUTING_HOST_CHECKS",
        ("execute_host_checks", 0): "STOP_EXECUTING_HOST_CHECKS",
        ("enable_flap_detection", 1): "ENABLE_FLAP_DETECTION",
        ("enable_flap_detection", 0): "DISABLE_FLAP_DETECTION",
        ("process_performance_data", 1): "ENABLE_PERFORMANCE_DATA",
        ("process_performance_data", 0): "DISABLE_PERFORMANCE_DATA",
        ("enable_event_handlers", 1): "ENABLE_EVENT_HANDLERS",
        ("enable_event_handlers", 0): "DISABLE_EVENT_HANDLERS",
    }

    command = commands.get((column, state))
    if command:
        sites.live().command("[%d] %s" % (int(time.time()), command), site)
        sites.live().set_only_sites([site])
        sites.live().query(
            "GET status\nWaitTrigger: program\nWaitTimeout: 10000\nWaitCondition: %s = %d\nColumns: %s\n"
            % (column, state, column)
        )
        sites.live().set_only_sites()
        render_master_control()
    else:
        html.write(_("Command %s/%d not found") % (html.attrencode(column), state))
예제 #18
0
def get_user_overview_data(extra_filter_headers):

    host_comment_query     = "GET comments\n" \
                             "Stats: author = %s\n" \
                             "Stats: host_acknowledged > 0\n" \
                             "StatsAnd: 2\n" % (config.user.id)

    svc_comment_query      = "GET comments\n" \
                             "Stats: author = %s\n" \
                             "Stats: service_acknowledged > 0\n" \
                             "StatsAnd: 2\n" % (config.user.id)

    # nagios core has noch column "downtime_reccuring" so we have to
    # take different lql for different cores

    if cmk.paths._get_core_name() == "cmc":
        down_host_query    = "GET downtimes\n" \
                             "Stats: host_scheduled_downtime_depth > 0\n" \
                             "Stats: downtime_recurring = 0\n" \
                             "Stats: service_scheduled_downtime_depth = 0\n" \
                             "StatsAnd: 3\n"

        down_service_query = "GET downtimes\n" \
                             "Stats: service_scheduled_downtime_depth > 0\n" \
                             "Stats: downtime_recurring = 0\n" \
                             "Stats: host_scheduled_downtime_depth = 0\n" \
                             "StatsAnd: 3\n"

    else:
        down_host_query    = "GET downtimes\n" \
                             "Stats: host_scheduled_downtime_depth > 0\n" \
                             "Stats: service_scheduled_downtime_depth = 0\n" \
                             "StatsAnd: 2\n"

        down_service_query = "GET downtimes\n" \
                             "Stats: service_scheduled_downtime_depth > 0\n" \
                             "Stats: host_scheduled_downtime_depth = 0\n" \
                             "StatsAnd: 2\n"

    try:
        host_commentdata = sites.live().query_summed_stats(host_comment_query)
        svc_commentdata = sites.live().query_summed_stats(svc_comment_query)
        down_host = sites.live().query_summed_stats(down_host_query)
        down_service = sites.live().query_summed_stats(down_service_query)

    except livestatus.MKLivestatusNotFoundError:
        return None, None, None, None
    else:
        return host_commentdata, svc_commentdata, down_host, down_service
def logfiles_of_host(site, host_name):
    if site: # Honor site hint if available
        sites.live().set_only_sites([site])
    file_names = sites.live().query_value(
        "GET hosts\n"
        "Columns: mk_logwatch_files\n"
        "Filter: name = %s\n" % lqencode(host_name))
    if site: # Honor site hint if available
        sites.live().set_only_sites(None)
    if file_names == None: # Not supported by that Livestatus version
        raise MKGeneralException(
            _("The monitoring core of the target site '%s' has the version '%s'. That "
              "does not support fetching logfile information. Please upgrade "
              "to a newer version.") % (site, sites.state(site)["program_version"]))
    return file_names
예제 #20
0
def logfiles_of_host(site, host_name):
    if site: # Honor site hint if available
        sites.live().set_only_sites([site])
    file_names = sites.live().query_value(
        "GET hosts\n"
        "Columns: mk_logwatch_files\n"
        "Filter: name = %s\n" % lqencode(host_name))
    if site: # Honor site hint if available
        sites.live().set_only_sites(None)
    if file_names == None: # Not supported by that Livestatus version
        raise MKGeneralException(
            _("The monitoring core of the target site '%s' has the version '%s'. That "
              "does not support fetching logfile information. Please upgrade "
              "to a newer version.") % (site, sites.state(site)["program_version"]))
    return file_names
def may_see(site, host_name):
    if config.user.may("general.see_all"):
        return True

    host_found = False
    try:
        if site:
            sites.live().set_only_sites([site])
        # Note: This query won't work in a distributed setup and no site given as argument
        # livestatus connection is setup with AuthUser
        host_found = sites.live().query_value("GET hosts\nStats: state >= 0\nFilter: name = %s\n" % lqencode(host_name)) > 0
    finally:
        sites.live().set_only_sites(None)

    return host_found
예제 #22
0
 def heading_info(self):
     current_value = self.current_value()
     if current_value:
         table = self.what.replace("host_contact", "contact").replace("service_contact", "contact")
         alias = sites.live().query_value("GET %sgroups\nCache: reload\nColumns: alias\nFilter: name = %s\n" %
             (table, lqencode(current_value)), current_value)
         return alias
예제 #23
0
 def heading_info(self):
     current_value = self.current_value()
     if current_value:
         table = self.what.replace("host_contact", "contact").replace("service_contact", "contact")
         alias = sites.live().query_value("GET %sgroups\nCache: reload\nColumns: alias\nFilter: name = %s\n" %
             (table, lqencode(current_value)), current_value)
         return alias
예제 #24
0
def may_see(site, host_name):
    if config.user.may("general.see_all"):
        return True

    # livestatus connection is setup with AuthUser
    return sites.live().query_value(
        "GET hosts\nStats: state >= 0\nFilter: name = %s\n" %
        lqencode(host_name)) > 0
예제 #25
0
def get_current_perfdata(host, service, dsname):
    perf_data = sites.live().query_value(
                    "GET services\nFilter: host_name = %s\nFilter: description = %s\n"
                    "Columns: perf_data" % (lqencode(host), lqencode(service)))

    for part in perf_data.split():
        name, rest = part.split("=")
        if name == dsname:
            return float(rest.split(";")[0])
예제 #26
0
def get_current_perfdata(host, service, dsname):
    perf_data = sites.live().query_value(
        "GET services\nFilter: host_name = %s\nFilter: description = %s\n"
        "Columns: perf_data" % (lqencode(host), lqencode(service)))

    for part in perf_data.split():
        name, rest = part.split("=")
        if name == dsname:
            return float(rest.split(";")[0])
예제 #27
0
def ajax_speedometer():
    try:
        # Try to get values from last call in order to compute
        # driftig speedometer-needle and to reuse the scheduled
        # check reate.
        last_perc = float(html.var("last_perc"))
        scheduled_rate = float(html.var("scheduled_rate"))
        last_program_start = int(html.var("program_start"))

        # Get the current rates and the program start time. If there
        # are more than one site, we simply add the start times.
        data = sites.live().query_summed_stats(
            "GET status\n"
            "Columns: service_checks_rate program_start")
        current_rate = data[0]
        program_start = data[1]

        # Recompute the scheduled_rate only if it is not known (first call)
        # or if one of the sites has been restarted. The computed value cannot
        # change during the monitoring since it just reflects the configuration.
        # That way we save CPU resources since the computation of the
        # scheduled checks rate needs to loop over all hosts and services.
        if last_program_start != program_start:
            # These days, we configure the correct check interval for Check_MK checks.
            # We do this correctly for active and for passive ones. So we can simply
            # use the check_interval of all services. Hosts checks are ignored.
            #
            # Manually added services without check_interval could be a problem, but
            # we have no control there.
            scheduled_rate = sites.live().query_summed_stats(
                "GET services\n"
                "Stats: suminv check_interval\n")[0] / 60.0

        percentage = 100.0 * current_rate / scheduled_rate
        title = _("Scheduled service check rate: %.1f/s, current rate: %.1f/s, that is "
                  "%.0f%% of the scheduled rate") % \
                  (scheduled_rate, current_rate, percentage)

    except Exception, e:
        scheduled_rate = 0
        program_start = 0
        percentage = 0
        last_perc = 0
        title = _("No performance data: %s") % e
예제 #28
0
def ajax_speedometer():
    try:
        # Try to get values from last call in order to compute
        # driftig speedometer-needle and to reuse the scheduled
        # check reate.
        last_perc          = float(html.var("last_perc"))
        scheduled_rate     = float(html.var("scheduled_rate"))
        last_program_start = int(html.var("program_start"))

        # Get the current rates and the program start time. If there
        # are more than one site, we simply add the start times.
        data = sites.live().query_summed_stats("GET status\n"
               "Columns: service_checks_rate program_start")
        current_rate = data[0]
        program_start = data[1]

        # Recompute the scheduled_rate only if it is not known (first call)
        # or if one of the sites has been restarted. The computed value cannot
        # change during the monitoring since it just reflects the configuration.
        # That way we save CPU resources since the computation of the
        # scheduled checks rate needs to loop over all hosts and services.
        if last_program_start != program_start:
            # These days, we configure the correct check interval for Check_MK checks.
            # We do this correctly for active and for passive ones. So we can simply
            # use the check_interval of all services. Hosts checks are ignored.
            #
            # Manually added services without check_interval could be a problem, but
            # we have no control there.
            scheduled_rate = sites.live().query_summed_stats(
                        "GET services\n"
                        "Stats: suminv check_interval\n")[0] / 60.0

        percentage = 100.0 * current_rate / scheduled_rate;
        title = _("Scheduled service check rate: %.1f/s, current rate: %.1f/s, that is "
                  "%.0f%% of the scheduled rate" %
                  (scheduled_rate, current_rate, percentage))

    except Exception, e:
        scheduled_rate = 0
        program_start = 0
        percentage = 0
        last_perc = 0
        title = _("No performance data: ") + str(e)
예제 #29
0
def may_see(host_name, site=None):
    if config.user.may("general.see_all"):
        return True


    query = "GET hosts\nStats: state >= 0\nFilter: name = %s\n" % lqencode(host_name)

    if site:
        sites.live().set_only_sites([site])

    result = sites.live().query_summed_stats(query, "ColumnHeaders: off\n")

    if site:
        sites.live().set_only_sites()

    if not result:
        return False
    else:
        return result[0] > 0
예제 #30
0
def may_see(host_name, site=None):
    if config.user.may("general.see_all"):
        return True

    query = "GET hosts\nStats: state >= 0\nFilter: name = %s\n" % lqencode(
        host_name)

    if site:
        sites.live().set_only_sites([site])

    result = sites.live().query_summed_stats(query, "ColumnHeaders: off\n")

    if site:
        sites.live().set_only_sites()

    if not result:
        return False
    else:
        return result[0] > 0
예제 #31
0
def load_failed_notifications(before=None,
                              after=None,
                              stat_only=False,
                              extra_headers=None):
    may_see_notifications =\
        config.user.may("general.see_failed_notifications") or\
        config.user.may("general.see_failed_notifications_24h")

    if not may_see_notifications:
        return None

    query_filters = [
        "class = 3",
        "log_type = SERVICE NOTIFICATION RESULT",
    ]

    if before is not None:
        query_filters.append("time <= %d" % before)
    if after is not None:
        query_filters.append("time >= %d" % after)
    if may_see_notifications and not config.user.may(
            "general.see_failed_notifications"):
        query_filters.append("time > %d" % (int(time.time()) - 86400))

    query = ["GET log"]
    if stat_only:
        query.append("Stats: log_state != 0")
    else:
        query.append("Columns: %s" % " ".join(g_columns))
        query_filters.append("log_state != 0")
    query += ["Filter: %s" % filt for filt in query_filters]

    query = "\n".join(query)

    if extra_headers is not None:
        query += extra_headers

    if stat_only:
        return sites.live().query_summed_stats(query)
    else:
        return sites.live().query(query)
예제 #32
0
def dashlet_graph_reload_js(nr, dashlet):
    host = dashlet['context'].get('host')
    if not host:
        raise MKUserError('host', _('Missing needed host parameter.'))

    service = dashlet['context'].get('service')
    if not service:
        service = "_HOST_"

    # When the site is available via URL context, use it. Otherwise it is needed
    # to check all sites for the requested host
    if html.has_var('site'):
        site = html.var('site')
    else:
        sites.live().set_prepend_site(True)
        query = "GET hosts\nFilter: name = %s\nColumns: name" % lqencode(host)
        try:
            site = sites.live().query_column(query)[0]
        except IndexError:
            raise MKUserError(
                "host", _("The host could not be found on any active site."))
        sites.live().set_prepend_site(False)

    # New graphs which have been added via "add to visual" option don't have a timerange
    # configured. So we assume the default timerange here by default.
    timerange = dashlet.get('timerange', '1')

    graph_specification = ("template", {
        "site": site,
        "host_name": host,
        "service_description": service,
        "graph_index": dashlet["source"] - 1,
    })
    graph_render_options = {
        "show_legend": dashlet.get("show_legend", False),
        "show_service": dashlet.get("show_service", True),
    }

    return "dashboard_render_graph(%d, %s, %s, '%s')" % \
            (nr, json.dumps(graph_specification), json.dumps(graph_render_options), timerange)
예제 #33
0
def get_crash_report_archive_as_string(site, host, service):
    query = "GET services\n" \
            "Filter: host_name = %s\n" \
            "Filter: service_description = %s\n" \
            "Columns: long_plugin_output\n" % (
            lqencode(host), lqencode(service))

    sites.live().set_only_sites([site])
    data = sites.live().query_value(query)
    sites.live().set_only_sites()

    if not data.startswith("Crash dump:\\n"):
        raise MKGeneralException(
            "No crash dump is available for this service.")
    encoded_tardata = data[13:].rstrip()
    if encoded_tardata.endswith("\\n"):
        encoded_tardata = encoded_tardata[:-2]

    try:
        return base64.b64decode(encoded_tardata)
    except Exception, e:
        raise MKGeneralException("Encoded crash dump data is invalid: %s" % e)
예제 #34
0
def dashlet_graph_reload_js(nr, dashlet):
    host = dashlet['context'].get('host')
    if not host:
        raise MKUserError('host', _('Missing needed host parameter.'))

    service = dashlet['context'].get('service')
    if not service:
        service = "_HOST_"

    # When the site is available via URL context, use it. Otherwise it is needed
    # to check all sites for the requested host
    if html.has_var('site'):
        site = html.var('site')
    else:
        sites.live().set_prepend_site(True)
        query = "GET hosts\nFilter: name = %s\nColumns: name" % lqencode(host)
        try:
            site = sites.live().query_column(query)[0]
        except IndexError:
            raise MKUserError("host", _("The host could not be found on any active site."))
        sites.live().set_prepend_site(False)

    # New graphs which have been added via "add to visual" option don't have a timerange
    # configured. So we assume the default timerange here by default.
    timerange = dashlet.get('timerange', '1')

    graph_specification = ("template", {
        "site"                : site,
        "host_name"           : host,
        "service_description" : service,
        "graph_index"         : dashlet["source"] -1,
    })
    graph_render_options = {
        "show_legend": dashlet.get("show_legend", False),
        "show_service" : dashlet.get("show_service", True),
    }

    return "dashboard_render_graph(%d, %s, %s, '%s')" % \
            (nr, json.dumps(graph_specification), json.dumps(graph_render_options), timerange)
예제 #35
0
def load_failed_notifications(before=None, after=None, stat_only=False, extra_headers=None):
    may_see_notifications =\
        config.may("general.see_failed_notifications") or\
        config.may("general.see_failed_notifications_24h")

    if not may_see_notifications:
        return None

    query_filters = [
        "class = 3",
        "log_type = SERVICE NOTIFICATION RESULT",
    ]

    if before is not None:
        query_filters.append("time <= %d" % before)
    if after is not None:
        query_filters.append("time >= %d" % after)
    if may_see_notifications and not config.may("general.see_failed_notifications"):
        query_filters.append("time > %d" % (int(time.time()) - 86400))

    query = ["GET log"]
    if stat_only:
        query.append("Stats: log_state != 0")
    else:
        query.append("Columns: %s" % " ".join(g_columns))
        query_filters.append("log_state != 0")
    query += ["Filter: %s" % filt for filt in query_filters]

    query = "\n".join(query)

    if extra_headers is not None:
        query += extra_headers

    if stat_only:
        return sites.live().query_summed_stats(query)
    else:
        return sites.live().query(query)
예제 #36
0
    def _execute_livestatus_command(self):
        self._rows = []
        self._too_much_rows = False

        self._generate_livestatus_command()

        if not self._livestatus_command:
            return

        sites.live().set_prepend_site(True)
        results = sites.live().query(self._livestatus_command)
        sites.live().set_prepend_site(False)

        # Invalid livestatus response, missing headers..
        if not results:
            return

        headers = ["site"] + self._queried_livestatus_columns
        self._rows = map(lambda x: dict(zip(headers, x)), results)

        limit = config.quicksearch_dropdown_limit
        if len(self._rows) > limit:
            self._too_much_rows = True
            self._rows.pop()  # Remove limit+1nth element
예제 #37
0
def get_rrd_data(hostname, service_description, varname, cf, fromtime, untiltime):
    step = 1
    rpn = "%s.%s" % (varname, cf.lower()) # "MAX" -> "max"
    query = "GET services\n" \
          "Columns: rrddata:m1:%s:%d:%d:%d\n" \
          "Filter: host_name = %s\n" \
          "Filter: description = %s\n" % (
             rpn, fromtime, untiltime, step,
             lqencode(hostname), lqencode(service_description))

    try:
        response = sites.live().query_row(query)[0]
    except Exception, e:
        if config.debug:
            raise
        raise MKGeneralException("Cannot get historic metrics via Livestatus: %s" % e)
    def command_archive_events_of_hosts(cmdtag, spec, row):
        if html.var("_archive_events_of_hosts"):
            if cmdtag == "HOST":
                tag = "host"
            elif cmdtag == "SVC":
                tag = "service"
            else:
                tag = None

            commands = []
            if tag and row.get('%s_check_command' % tag, "").startswith('check_mk_active-mkevents'):
                data = sites.live().query("GET eventconsoleevents\n" +\
                                          "Columns: event_id\n" +\
                                          "Filter: host_name = %s" % \
                                          row['host_name'])
                commands = [ "DELETE;%s;%s" % (entry[0], config.user.id) for entry in data ]
            return commands, "<b>archive all events of all hosts</b> of"
예제 #39
0
def get_rrd_data(hostname, service_description, varname, cf, fromtime,
                 untiltime):
    step = 1
    rpn = "%s.%s" % (varname, cf.lower())  # "MAX" -> "max"
    query = "GET services\n" \
          "Columns: rrddata:m1:%s:%d:%d:%d\n" \
          "Filter: host_name = %s\n" \
          "Filter: description = %s\n" % (
             rpn, fromtime, untiltime, step,
             lqencode(hostname), lqencode(service_description))

    try:
        response = sites.live().query_row(query)[0]
    except Exception, e:
        if config.debug:
            raise
        raise MKGeneralException(
            "Cannot get historic metrics via Livestatus: %s" % e)
예제 #40
0
파일: wato.py 프로젝트: ffeldhaus/check_mk
    def display(self):
        self.check_wato_data_update()
        # Note: WATO Folders that the user has not permissions to must not be visible.
        # Permissions in this case means, that the user has view permissions for at
        # least one host in that folder.
        result = sites.live().query("GET hosts\nCache: reload\nColumns: filename\nStats: state >= 0\n")
        allowed_folders = set([""])
        for path, host_count in result:
            # convert '/wato/server/hosts.mk' to 'server'
            folder = path[6:-9]
            # allow the folder an all of its parents
            parts = folder.split("/")
            subfolder = ""
            for part in parts:
                if subfolder:
                    subfolder += "/"
                subfolder += part
                allowed_folders.add(subfolder)

        html.select(self.name, [("", "")] + [ entry for entry in self.selection if (entry[0] in allowed_folders) ])
예제 #41
0
    def filter(self, infoname):
        if not html.has_var(self.htmlvars[0]):
            return "" # Skip if filter is not being set at all

        current_value = self.current_value()
        if not current_value:
            if not self.enforce:
                return ""
            # Take first group with the name we search
            table = self.what.replace("host_contact", "contact").replace("service_contact", "contact")
            current_value = sites.live().query_value("GET %sgroups\nCache: reload\nColumns: name\nLimit: 1\n" % table, None)

        if current_value == None:
            return "" # no {what}group exists!

        col = self.what + "_groups"
        if not self.enforce and html.var(self.htmlvars[1]):
            negate = "!"
        else:
            negate = ""
        return "Filter: %s %s>= %s\n" % (col, negate, lqencode(current_value))
예제 #42
0
    def filter(self, infoname):
        if not html.has_var(self.htmlvars[0]):
            return "" # Skip if filter is not being set at all

        current_value = self.current_value()
        if not current_value:
            if not self.enforce:
                return ""
            # Take first group with the name we search
            table = self.what.replace("host_contact", "contact").replace("service_contact", "contact")
            current_value = sites.live().query_value("GET %sgroups\nCache: reload\nColumns: name\nLimit: 1\n" % table, None)

        if current_value == None:
            return "" # no {what}group exists!

        col = self.what + "_groups"
        if not self.enforce and html.var(self.htmlvars[1]):
            negate = "!"
        else:
            negate = ""
        return "Filter: %s %s>= %s\n" % (col, negate, lqencode(current_value))
예제 #43
0
    def display(self):
        self.check_wato_data_update()
        # Note: WATO Folders that the user has not permissions to must not be visible.
        # Permissions in this case means, that the user has view permissions for at
        # least one host in that folder.
        result = sites.live().query(
            "GET hosts\nCache: reload\nColumns: filename\nStats: state >= 0\n")
        allowed_folders = set([""])
        for path, host_count in result:
            # convert '/wato/server/hosts.mk' to 'server'
            folder = path[6:-9]
            # allow the folder an all of its parents
            parts = folder.split("/")
            subfolder = ""
            for part in parts:
                if subfolder:
                    subfolder += "/"
                subfolder += part
                allowed_folders.add(subfolder)

        html.select(self.name, [("", "")] + [
            entry for entry in self.selection if (entry[0] in allowed_folders)
        ])
예제 #44
0
def search_livestatus(used_filters):
    limit = config.quicksearch_dropdown_limit

    # We need to know which plugin lead to finding a particular host, so it
    # is neccessary to make one query for each plugin - sorry. For example
    # for the case, that a host can be found via alias or name.
    data = []

    sites.live().set_prepend_site(True)
    for plugin in search_plugins:
        if 'filter_func' not in plugin:
            continue

        if not plugin_matches_filters(plugin, used_filters):
            continue

        lq_filter = plugin['filter_func'](used_filters)
        if lq_filter:
            lq_table = plugin.get("lq_table", plugin.get("id"))
            lq_columns = plugin.get("lq_columns")
            lq         = "GET %s\nCache: reload\nColumns: %s\n%sLimit: %d\n" % \
                          (lq_table, " ".join(lq_columns), lq_filter, limit)
            #html.debug("<br>%s" % lq.replace("\n", "<br>"))

            lq_columns = ["site"] + lq_columns
            for row in sites.live().query(lq):
                # Put result columns into a dict
                row_dict = {}
                for idx, col in enumerate(row):
                    row_dict[lq_columns[idx]] = col

                # The plugin itself might add more info to the row
                # This is saved into an extra dict named options
                options = {}
                if plugin.get("match_url_tmpl_func"):
                    options["url"] = plugin["match_url_tmpl_func"](
                        used_filters, row_dict)

                data.append([plugin] + [options] + [row_dict])
            if len(data) >= limit:
                break

    for plugin in search_plugins:
        if "search_func" in plugin and plugin_matches_filters(
                plugin, used_filters):
            for row in plugin['search_func'](used_filters):
                row_options, row_data = row
                data.append((plugin, row_options, row_data))

    sites.live().set_prepend_site(False)

    # Apply the limit once again (search_funcs of plugins could have added some results)
    data = data[:limit]

    used_keys = []

    # Function to create a unqiue hashable key from a row
    def get_key(row):
        plugin, row_options, row_data = row
        name = row_data.get(get_row_name(row))
        return (row_data.get("site"), row_data.get("host_name"), name)

    # Remove duplicate rows
    used_keys = []
    new_data = []
    for row in data:
        row_key = get_key(row)
        if row_key not in used_keys:
            new_data.append(row)
            used_keys.append(row_key)
    data = new_data

    # Sort data if its not a host filter
    def sort_data(data):
        sorted_data = data

        def sort_fctn(a, b):
            return cmp(get_key(a), get_key(b))

        data.sort(cmp=sort_fctn)
        return sorted_data

    search_types = list(set(map(lambda x: x[0], used_filters)))
    if len(used_filters) > 1 and search_types != ["hosts"]:
        data = sort_data(data)

    return data
예제 #45
0
def may_see(host_name):
    if config.may("general.see_all"):
        return True

    return sites.live().query_value("GET hosts\nStats: state >= 0\nFilter: name = %s\n" % lqencode(host_name)) > 0
예제 #46
0
def search_livestatus(used_filters):
    limit = config.quicksearch_dropdown_limit

    # We need to know which plugin lead to finding a particular host, so it
    # is neccessary to make one query for each plugin - sorry. For example
    # for the case, that a host can be found via alias or name.
    data = []

    sites.live().set_prepend_site(True)
    for plugin in search_plugins:
        if "filter_func" not in plugin:
            continue

        if not plugin_matches_filters(plugin, used_filters):
            continue

        lq_filter = plugin["filter_func"](used_filters)
        if lq_filter:
            lq_table = plugin.get("lq_table", plugin.get("id"))
            lq_columns = plugin.get("lq_columns")
            lq = "GET %s\nCache: reload\nColumns: %s\n%sLimit: %d\n" % (
                lq_table,
                " ".join(lq_columns),
                lq_filter,
                limit,
            )
            # html.debug("<br>%s" % lq.replace("\n", "<br>"))

            lq_columns = ["site"] + lq_columns
            for row in sites.live().query(lq):
                # Put result columns into a dict
                row_dict = {}
                for idx, col in enumerate(row):
                    row_dict[lq_columns[idx]] = col

                # The plugin itself might add more info to the row
                # This is saved into an extra dict named options
                options = {}
                if plugin.get("match_url_tmpl_func"):
                    options["url"] = plugin["match_url_tmpl_func"](used_filters, row_dict)

                data.append([plugin] + [options] + [row_dict])
            if len(data) >= limit:
                break

    for plugin in search_plugins:
        if "search_func" in plugin and plugin_matches_filters(plugin, used_filters):
            for row in plugin["search_func"](used_filters):
                row_options, row_data = row
                data.append((plugin, row_options, row_data))

    sites.live().set_prepend_site(False)

    # Apply the limit once again (search_funcs of plugins could have added some results)
    data = data[:limit]

    used_keys = []

    # Function to create a unqiue hashable key from a row
    def get_key(row):
        plugin, row_options, row_data = row
        name = row_data.get(get_row_name(row))
        return (row_data.get("site"), row_data.get("host_name"), name)

    # Remove duplicate rows
    used_keys = []
    new_data = []
    for row in data:
        row_key = get_key(row)
        if row_key not in used_keys:
            new_data.append(row)
            used_keys.append(row_key)
    data = new_data

    # Sort data if its not a host filter
    def sort_data(data):
        sorted_data = data

        def sort_fctn(a, b):
            return cmp(get_key(a), get_key(b))

        data.sort(cmp=sort_fctn)
        return sorted_data

    search_types = list(set(map(lambda x: x[0], used_filters)))
    if len(used_filters) > 1 and search_types != ["hosts"]:
        data = sort_data(data)

    return data
예제 #47
0
 def display(self):
     selection = sites.live().query_column_unique(self.query)
     html.sorted_select(self.name, [("", "")] + [(x,x) for x in selection])
예제 #48
0
 def display(self):
     selection = sites.live().query_column_unique(self.query)
     html.sorted_select(self.name, [("", "")] + [(x,x) for x in selection])
예제 #49
0
def get_status():
    response = sites.live().query("GET eventconsolestatus")
    return dict(zip(response[0], response[1]))
예제 #50
0
def all_groups(what):
    groups = dict(sites.live().query("GET %sgroups\nCache: reload\nColumns: name alias\n" % what))
    return [ (name, groups[name] or name) for name in groups.keys() ]
예제 #51
0
def acknowledge_logfile(site, host_name, int_filename, display_name):
    if not may_see(site, host_name):
        raise MKAuthException(_('Permission denied.'))

    command = "MK_LOGWATCH_ACKNOWLEDGE;%s;%s" % (host_name, int_filename)
    sites.live().command("[%d] %s" % (int(time.time()), command), site)
예제 #52
0
def acknowledge_logfile(site, host_name, int_filename, display_name):
    if not may_see(site, host_name):
        raise MKAuthException(_('Permission denied.'))

    command = "MK_LOGWATCH_ACKNOWLEDGE;%s;%s" % (host_name, int_filename)
    sites.live().command("[%d] %s" % (int(time.time()), command), site)
예제 #53
0
def may_see(site, host_name):
    if config.user.may("general.see_all"):
        return True

    # livestatus connection is setup with AuthUser
    return sites.live().query_value("GET hosts\nStats: state >= 0\nFilter: name = %s\n" % lqencode(host_name)) > 0
예제 #54
0
def all_groups(what):
    groups = dict(sites.live().query("GET %sgroups\nCache: reload\nColumns: name alias\n" % what))
    return [ (name, groups[name] or name) for name in groups.keys() ]
예제 #55
0
def get_status():
    response = sites.live().query("GET eventconsolestatus")
    return dict(zip(response[0], response[1]))
예제 #56
0
def pending_hosts():
    sites.live().set_prepend_site(True)
    hosts = sites.live().query("GET hosts\nFilter: has_been_checked = 0\nColumns: name")
    sites.live().set_prepend_site(False)
    return hosts
예제 #57
0
def render_statistics(pie_id, what, table, filter, dashlet):
    pie_diameter     = 130
    pie_left_aspect  = 0.5
    pie_right_aspect = 0.8

    if what == 'hosts':
        info = 'host'
        infos = [ info ]
    else:
        info = 'service'
        infos = [ 'host', 'service' ]
    use_filters = visuals.filters_of_visual(dashlet, infos)
    for filt in use_filters:
        if filt.available() and not isinstance(filt, visuals.FilterSite):
            filter += filt.filter(info)

    query = "GET %s\n" % what
    for entry in table:
        query += entry[3]
    query += filter

    site = dashlet['context'].get('siteopt', {}).get('site')
    if site:
        sites.live().set_only_sites([site])
        result = sites.live().query_row(query)
        sites.live().set_only_sites()
    else:
        result = sites.live().query_summed_stats(query)

    pies = zip(table, result)
    total = sum([x[1] for x in pies])

    html.write("<div class=stats>")
    html.write('<canvas class=pie width=%d height=%d id="%s_stats" style="float: left"></canvas>' %
            (pie_diameter, pie_diameter, pie_id))
    html.write('<img src="images/globe.png" class="globe">')

    html.write('<table class="hoststats%s" style="float:left">' % (
        len(pies) > 1 and " narrow" or ""))

    table_entries = pies
    while len(table_entries) < 6:
        table_entries = table_entries + [ (("", "#95BBCD", "", ""), "&nbsp;") ]
    table_entries.append(((_("Total"), "", "all%s" % what, ""), total))

    for (name, color, viewurl, query), count in table_entries:
        url = "view.py?view_name=" + viewurl + "&filled_in=filter&search=1"
        for filter_name, url_params in dashlet['context'].items():
            if filter_name == "wato_folder" and html.has_var("wato_folder"):
                url += "&wato_folder=" + html.var("wato_folder")

            elif filter_name == "svcstate":
                # The svcstate filter URL vars are controlled by dashlet
                continue

            else:
                url += '&' + html.urlencode_vars(url_params.items())

        html.write('<tr><th><a href="%s">%s</a></th>' % (url, name))
        style = ''
        if color:
            style = ' style="background-color: %s"' % color
        html.write('<td class=color%s>'
                   '</td><td><a href="%s">%s</a></td></tr>' % (style, url, count))

    html.write("</table>")

    r = 0.0
    pie_parts = []
    if total > 0:
        # Count number of non-empty classes
        num_nonzero = 0
        for info, value in pies:
            if value > 0:
                num_nonzero += 1

        # Each non-zero class gets at least a view pixels of visible thickness.
        # We reserve that space right now. All computations are done in percent
        # of the radius.
        separator = 0.02                                    # 3% of radius
        remaining_separatorspace = num_nonzero * separator  # space for separators
        remaining_radius = 1 - remaining_separatorspace     # remaining space
        remaining_part = 1.0 # keep track of remaining part, 1.0 = 100%

        # Loop over classes, begin with most outer sphere. Inner spheres show
        # worse states and appear larger to the user (which is the reason we
        # are doing all this stuff in the first place)
        for (name, color, viewurl, q), value in pies[::1]:
            if value > 0 and remaining_part > 0: # skip empty classes

                # compute radius of this sphere *including all inner spheres!* The first
                # sphere always gets a radius of 1.0, of course.
                radius = remaining_separatorspace + remaining_radius * (remaining_part ** (1/3.0))
                pie_parts.append('chart_pie("%s", %f, %f, %r, true);' % (pie_id, pie_right_aspect, radius, color))
                pie_parts.append('chart_pie("%s", %f, %f, %r, false);' % (pie_id, pie_left_aspect, radius, color))

                # compute relative part of this class
                part = float(value) / total # ranges from 0 to 1
                remaining_part           -= part
                remaining_separatorspace -= separator


    html.write("</div>")
    html.javascript("""
function chart_pie(pie_id, x_scale, radius, color, right_side) {
    var context = document.getElementById(pie_id + "_stats").getContext('2d');
    if (!context)
        return;
    var pie_x = %(x)f;
    var pie_y = %(y)f;
    var pie_d = %(d)f;
    context.fillStyle = color;
    context.save();
    context.translate(pie_x, pie_y);
    context.scale(x_scale, 1);
    context.beginPath();
    if(right_side)
        context.arc(0, 0, (pie_d / 2) * radius, 1.5 * Math.PI, 0.5 * Math.PI, false);
    else
        context.arc(0, 0, (pie_d / 2) * radius, 0.5 * Math.PI, 1.5 * Math.PI, false);
    context.closePath();
    context.fill();
    context.restore();
    context = null;
}


if (has_canvas_support()) {
    %(p)s
}
""" % { "x" : pie_diameter / 2, "y": pie_diameter/2, "d" : pie_diameter, 'p': '\n'.join(pie_parts) })
def get_tactical_overview_data2(extra_filter_headers):
    configured_staleness_threshold = config.staleness_threshold

    host_query = \
        "GET hosts\n" \
        "Stats: state >= 0\n" \
        "Stats: state > 0\n" \
        "Stats: scheduled_downtime_depth = 0\n" \
        "StatsAnd: 2\n" \
        "Stats: state > 0\n" \
        "Stats: scheduled_downtime_depth = 0\n" \
        "Stats: acknowledged = 0\n" \
        "StatsAnd: 3\n" \
        "Stats: host_staleness >= %s\n" % configured_staleness_threshold + \
        "Stats: host_scheduled_downtime_depth = 0\n" \
        "StatsAnd: 2\n" \
        "Filter: custom_variable_names < _REALNAME\n" + \
        extra_filter_headers

    service_query = \
        "GET services\n" \
        "Stats: state >= 0\n" \
        "Stats: state > 0\n" \
        "Stats: scheduled_downtime_depth = 0\n" \
        "Stats: host_scheduled_downtime_depth = 0\n" \
        "Stats: host_state = 0\n" \
        "StatsAnd: 4\n" \
        "Stats: state > 0\n" \
        "Stats: scheduled_downtime_depth = 0\n" \
        "Stats: host_scheduled_downtime_depth = 0\n" \
        "Stats: acknowledged = 0\n" \
        "Stats: host_state = 0\n" \
        "StatsAnd: 5\n" \
        "Stats: service_staleness >= %s\n" % configured_staleness_threshold + \
        "Stats: host_scheduled_downtime_depth = 0\n" \
        "Stats: service_scheduled_downtime_depth = 0\n" \
        "Stats: host_state = 0\n" \
        "StatsAnd: 4\n" \
        "Filter: host_custom_variable_names < _REALNAME\n" + \
        extra_filter_headers

    service_query_2 = \
        "GET services\n" \
        "Stats: state >= 0\n" \
        "Stats: state > 0\n" \
        "Stats: scheduled_downtime_depth = 0\n" \
        "Stats: host_scheduled_downtime_depth = 0\n" \
        "Stats: host_state = 0\n" \
        "StatsAnd: 4\n" \
        "Stats: state > 0\n" \
        "Stats: scheduled_downtime_depth = 0\n" \
        "Stats: host_scheduled_downtime_depth = 0\n" \
        "Stats: acknowledged = 0\n" \
        "Stats: host_state = 0\n" \
        "StatsAnd: 5\n" \
        "Stats: service_staleness >= %s\n" % configured_staleness_threshold + \
        "Stats: host_scheduled_downtime_depth = 0\n" \
        "Stats: service_scheduled_downtime_depth = 0\n" \
        "Stats: host_state = 0\n" \
        "StatsAnd: 4\n" \
        "Filter: host_custom_variable_names < _REALNAME\n" \
        "Filter: notifications_enabled = 1\n"
        
    event_query = (
        # "Events" column
        "GET eventconsoleevents\n"
        "Stats: event_phase = open\n"
        "Stats: event_phase = ack\n"
        "StatsOr: 2\n"
        # "Problems" column
        "Stats: event_phase = open\n"
        "Stats: event_phase = ack\n"
        "StatsOr: 2\n"
        "Stats: event_state != 0\n"
        "StatsAnd: 2\n"
        # "Unhandled" column
        "Stats: event_phase = open\n"
        "Stats: event_state != 0\n"
        "StatsAnd: 2\n"
    )
    
    try:
        hstdata = sites.live().query_summed_stats(host_query)
        svcdata = sites.live().query_summed_stats(service_query)
        svc2data = sites.live().query_summed_stats(service_query_2)
        notdata = notifications.load_failed_notifications(
                        after=notifications.acknowledged_time(),
                        stat_only=True,
                        extra_headers=extra_filter_headers)

        try:
            sites.live().set_auth_domain("ec")
            event_data = sites.live().query_summed_stats(event_query)
        except livestatus.MKLivestatusNotFoundError:
            event_data = [0, 0, 0]
        finally:
            sites.live().set_auth_domain("read")

    except livestatus.MKLivestatusNotFoundError:
        return None, None, None, None, None

    return hstdata, svcdata, svc2data, notdata, event_data