Пример #1
0
def main(sys_argv=None):

    args = parse_arguments(sys_argv or sys.argv[1:])

    socket.setdefaulttimeout(args.timeout)

    # The dynamic library detection of Snap7Library using ctypes.util.find_library does not work for
    # some reason. Load the library from our standard path.
    Snap7Library(lib_location="%s/lib/libsnap7.so" % os.environ["OMD_ROOT"])

    client = snap7.client.Client()

    for device in args.hostspec:

        hostname = device["host_name"]

        try:
            client.connect(device["host_address"], device["rack"],
                           device["slot"], device["port"])
        except Snap7Exception as e:
            print(_snap7error(hostname, "Error connecting to device", e),
                  file=sys.stderr)
            continue

        try:
            cpu_state = client.get_cpu_state()
        except Snap7Exception as e:
            cpu_state = None
            print(_snap7error(hostname, "Error reading device CPU state", e),
                  file=sys.stderr)

        parsed_area_values = []
        for (area_name,
             db_number), iter_values in _group_device_values(device):
            values = list(iter_values)
            start_address, end_address = _addresses_from_area_values(values)
            try:
                area_value = client.read_area(
                    _area_name_to_area_id(area_name),
                    db_number,
                    start_address,
                    size=end_address - start_address,
                )
            except Snap7Exception as e:
                print(_snap7error(hostname, "Error reading data area", e),
                      file=sys.stderr)
                continue

            parsed_area_values.extend(
                _cast_values(values, start_address, area_value))

        with SectionWriter("siemens_plc_cpu_state", None) as writer:
            if cpu_state is not None:
                writer.append(cpu_state)

        with SectionWriter("siemens_plc", None) as writer:
            for values in parsed_area_values:
                writer.append("%s %s %s %s" % (hostname, *values))
Пример #2
0
def agent_prism_main(args: Args) -> None:
    """Establish a connection to a Prism server and process containers, alerts, clusters and
    storage_pools"""
    LOGGING.info("setup HTTPS connection..")
    requester = HTTPSAuthRequester(
        args.server,
        args.port,
        "PrismGateway/services/rest/v1",
        args.username,
        args.password,
    )

    LOGGING.info("fetch and write container info..")
    output_containers(requester)

    LOGGING.info("fetch and write alerts..")
    with SectionWriter("prism_alerts") as writer:
        writer.append_json(output_alerts(requester))

    LOGGING.info("fetch and write cluster info..")
    output_cluster(requester)

    LOGGING.info("fetch and write storage_pools..")
    output_storage_pools(requester)

    LOGGING.info("all done. bye.")
Пример #3
0
def write_performance_section(
    section_name: SectionName,
    section_model: Type[BaseModel],
    section_containers: Sequence[BaseModel],
) -> None:
    with SectionWriter(f"k8s_live_{section_name}_v1") as writer:
        writer.append(section_model(containers=section_containers).json())
Пример #4
0
def _piggyback_serializer(section: PiggyBackSection):
    with ConditionalPiggybackSection(section.name):
        with SectionWriter("labels") as w:
            w.append(json.dumps(section.labels))
        for s in section.sections:
            new_s = ResultSection(f"{section.service_name}_{s.name}",
                                  s.results)
            _result_serializer(new_s)
Пример #5
0
def _write_sections(
        sections: Mapping[str, Callable[[], Optional[JsonProtocol]]]) -> None:
    for section_name, section_call in sections.items():
        with SectionWriter(section_name) as writer:
            section_output = section_call()
            if not section_output:
                continue
            writer.append(section_output.json())
Пример #6
0
def agent_mobileiron_main(args: Args) -> None:
    """Fetches and writes selected information formatted as agent output to stdout.
    Standard out with sections and piggyback example:
    <<<<entityName>>>>
    <<<mobileiron_section>>>
    {"...": ...}
    <<<<entityName>>>>
    <<<mobileiron_source_host>>>
    {"...": ...}
    <<<<>>>>
    """

    LOGGER.info("Fetch general device information...")

    if args.debug:
        LOGGER.debug("Initialize Mobileiron API")

    with MobileironAPI(
            args.hostname,
            args.port,
            auth=(args.username, args.password),
            verify=not args.no_cert_check,
            proxies=_proxy_address(
                args.proxy_host,
                args.proxy_port,
                args.proxy_user,
                args.proxy_password,
            ) if args.proxy_host else None,
    ) as mobileiron_api:

        all_devices = mobileiron_api.get_all_devices(partitions=args.partition)

    if args.debug:
        LOGGER.debug("Received the following devices: %s", all_devices)

    LOGGER.info("Write agent output..")
    for device in all_devices:
        if "total_count" in all_devices[device]:
            with ConditionalPiggybackSection(device), SectionWriter(
                    "mobileiron_source_host") as writer:
                writer.append_json(all_devices[device])
        else:
            with ConditionalPiggybackSection(device), SectionWriter(
                    "mobileiron_section") as writer:
                writer.append_json(all_devices[device])
Пример #7
0
def alertmanager_rules_section(
    api_client: AlertmanagerAPI,
    config: Dict[str, Any],
) -> None:
    rule_groups = retrieve_rule_data(api_client)
    if not rule_groups.get("groups"):
        return
    parsed_data = parse_rule_data(rule_groups["groups"], config["ignore_alerts"])
    with ConditionalPiggybackSection(config["hostname"]):
        with SectionWriter("alertmanager") as writer:
            writer.append_json(parsed_data)
Пример #8
0
def _monitors_section(
    datadog_api: DatadogAPI,
    args: Args,
) -> None:
    LOGGER.debug("Querying monitors")
    with SectionWriter("datadog_monitors") as writer:
        for monitor in MonitorsQuerier(datadog_api).query_monitors(
            args.monitor_tags,
            args.monitor_monitor_tags,
        ):
            writer.append_json(monitor)
Пример #9
0
def agent_mqtt_main(args: Args) -> None:
    try:
        received = receive_from_mqtt(args)
    except RuntimeError as e:
        if args.debug:
            raise
        print(str(e), file=sys.stderr)
        sys.exit(1)

    with SectionWriter("mqtt_statistics") as writer:
        writer.append_json({args.instance_id: received.topics})
    sys.exit(0)
Пример #10
0
def _events_section(datadog_api: DatadogAPI, args: Args) -> None:
    LOGGER.debug("Querying events")
    events = list(EventsQuerier(
        datadog_api,
        args.hostname,
    ).query_events(args.event_tags))
    _forward_events_to_ec(
        events,
        args.event_tags_show,
        args.event_syslog_facility,
        args.event_syslog_priority,
        args.event_service_level,
        args.event_add_text,
    )
    with SectionWriter("datadog_events") as writer:
        writer.append(len(events))
Пример #11
0
def write_section(
        all_files: Generator[Tuple[str, List[File]], None, None]) -> None:
    with SectionWriter("fileinfo", separator="|") as writer:
        now = datetime.utcnow().replace(tzinfo=timezone.utc)
        writer.append(int(datetime.timestamp(now)))
        writer.append("[[[header]]]")
        writer.append("name|status|size|time")
        writer.append("[[[content]]]")
        for pattern, shared_files in all_files:
            if not shared_files:
                writer.append(f"{pattern}|missing")
                continue

            for shared_file in shared_files:
                file_obj = shared_file.file
                age = int(file_obj.last_write_time)
                file_info = f"{shared_file.path}|ok|{file_obj.file_size}|{age}"
                writer.append(file_info)
Пример #12
0
def agent_storeonce4x_main(args: Args) -> None:
    if not args.verify_ssl:
        urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

    oauth_session = StoreOnceOauth2Session(
        args.host,
        args.port,
        args.user,
        args.password,
        args.verify_ssl,
    )

    for section_basename, function in SECTIONS:
        with SectionWriter("storeonce4x_%s" % section_basename) as writer:
            try:
                writer.append_json(function(oauth_session))
            except Exception as exc:
                if args.debug:
                    raise
                LOGGER.error("Caught exception: %r", exc)
Пример #13
0
def _result_serializer(section: ResultSection):
    with SectionWriter(f"gcp_service_{section.name}") as w:
        for r in section.results:
            w.append(Result.serialize(r))
Пример #14
0
def _asset_serializer(section: AssetSection):
    with SectionWriter("gcp_assets") as w:
        w.append(json.dumps(dict(project=section.project)))
        for a in section.assets:
            w.append(Asset.serialize(a))
Пример #15
0
def agent_proxmox_ve_main(args: Args) -> None:
    """Fetches and writes selected information formatted as agent output to stdout"""
    with ProxmoxVeAPI(
            host=args.hostname,
            port=args.port,
            credentials={
                k: getattr(args, k)
                for k in ("username", "password") if getattr(args, k)
            },
            timeout=args.timeout,
            verify_ssl=not args.no_cert_check,
    ) as session:
        LOGGER.info("Fetch general cluster and node information..")
        data = session.get_tree({
            "cluster": {
                "backup": [],
                "resources": [],
            },
            "nodes": [{
                "{node}": {
                    "subscription": {},
                    # for now just get basic task data - we'll read the logs later
                    "tasks": [],
                    "qemu": [{
                        "{vmid}": {
                            "snapshot": [],
                        }
                    }],
                    "lxc": [{
                        "{vmid}": {
                            "snapshot": [],
                        }
                    }],
                    "version": {},
                    "time": {},
                },
            }],
            "version": {},
        })

        LOGGER.info("Fetch and process backup logs..")
        logged_backup_data = fetch_backup_data(args, session, data["nodes"])

    all_vms = {
        str(entry["vmid"]): entry
        for entry in data["cluster"]["resources"]
        if entry["type"] in ("lxc", "qemu")
    }

    backup_data = {
        # generate list of all VMs IDs - both lxc and qemu
        "vmids":
        sorted(list(all_vms.keys())),
        # look up scheduled backups and extract assigned VMIDs
        "scheduled_vmids":
        sorted(
            list(
                set(vmid  #
                    for backup in data["cluster"]["backup"]
                    if "vmid" in backup and backup["enabled"] == "1"
                    for vmid in backup["vmid"].split(",")))),
        # add data of actually logged VMs
        "logged_vmids":
        logged_backup_data,
    }

    node_timezones = {}  # Timezones on nodes can be potentially different
    snapshot_data = {}

    for node in data["nodes"]:
        node_timezones[node["node"]] = node["time"]["timezone"]
        # only lxc and qemu can have snapshots
        for vm in node.get("lxc", []) + node.get("qemu", []):
            snapshot_data[str(vm["vmid"])] = {
                "snaptimes":
                [x["snaptime"] for x in vm["snapshot"] if "snaptime" in x],
            }

    def date_to_utc(naive_string: str, tz: str) -> str:
        """
        Adds timezone information to a date string.
        Returns a timezone-aware string
        """
        local_tz = pytz.timezone(tz)
        timezone_unaware = datetime.strptime(naive_string, "%Y-%m-%d %H:%M:%S")
        timezone_aware = local_tz.localize(timezone_unaware)
        return timezone_aware.strftime("%Y-%m-%d %H:%M:%S%z")

    #  overwrite all the start time strings with timezone aware start strings
    for vmid in logged_backup_data:
        logged_backup_data[vmid]["started_time"] = date_to_utc(
            logged_backup_data[vmid]["started_time"],
            node_timezones[all_vms[vmid]["node"]])

    LOGGER.info("all VMs:          %r", backup_data["vmids"])
    LOGGER.info("expected backups: %r", backup_data["scheduled_vmids"])
    LOGGER.info("actual backups:   %r",
                sorted(list(logged_backup_data.keys())))
    LOGGER.info("snaptimes:        %r", snapshot_data)

    LOGGER.info("Write agent output..")
    for node in data["nodes"]:
        assert node["type"] == "node"
        piggyback_host = None if args.hostname.startswith(
            node["node"] + ".") else node["node"]
        with ConditionalPiggybackSection(piggyback_host):
            with SectionWriter("proxmox_ve_node_info") as writer:
                writer.append_json({
                    "status":
                    node["status"],
                    "lxc": [
                        vmid for vmid in all_vms
                        if all_vms[vmid]["type"] == "lxc"
                    ],
                    "qemu": [
                        vmid for vmid in all_vms
                        if all_vms[vmid]["type"] == "qemu"
                    ],
                    "proxmox_ve_version":
                    node["version"],
                    "time_info":
                    node["time"],
                    "subscription": {
                        key: value
                        for key, value in node["subscription"].items()
                        if key in {
                            "status",
                            "checktime",
                            "key",
                            "level",
                            "nextduedate",
                            "productname",
                            "regdate",
                        }
                    },
                })
            with SectionWriter("proxmox_ve_mem_usage") as writer:
                writer.append_json({
                    "mem": node["mem"],
                    "max_mem": node["maxmem"],
                })
            with SectionWriter("uptime", separator=None) as writer:
                writer.append(node["uptime"])

    for vmid, vm in all_vms.items():
        with ConditionalPiggybackSection(vm["name"]):
            with SectionWriter("proxmox_ve_vm_info") as writer:
                writer.append_json({
                    "vmid": vmid,
                    "node": vm["node"],
                    "type": vm["type"],
                    "status": vm["status"],
                    "name": vm["name"],
                })
            if vm["type"] != "qemu":
                with SectionWriter("proxmox_ve_disk_usage") as writer:
                    writer.append_json({
                        "disk": vm["disk"],
                        "max_disk": vm["maxdisk"],
                    })
            with SectionWriter("proxmox_ve_mem_usage") as writer:
                writer.append_json({
                    "mem": vm["mem"],
                    "max_mem": vm["maxmem"],
                })
            with SectionWriter("proxmox_ve_vm_backup_status") as writer:
                writer.append_json({
                    # todo: info about erroneous backups
                    "last_backup": logged_backup_data.get(vmid),
                })
            with SectionWriter("proxmox_ve_vm_snapshot_age") as writer:
                writer.append_json(snapshot_data.get(vmid))
Пример #16
0
def agent_proxmox_ve_main(args: Args) -> None:
    """Fetches and writes selected information formatted as agent output to stdout"""
    with ProxmoxVeAPI(
            host=args.hostname,
            port=args.port,
            credentials={
                k: getattr(args, k)
                for k in {"username", "password"} if getattr(args, k)
            },
            timeout=args.timeout,
            verify_ssl=not args.no_cert_check,
    ) as session:
        LOGGER.info("Fetch general cluster and node information..")
        data = session.get_tree({
            "cluster": {
                "backup": [],
                "resources": [],
            },
            "nodes": [{
                "{node}": {
                    "subscription": {},
                    # for now just get basic task data - we'll read the logs later
                    "tasks": [],
                    "version": {},
                },
            }],
            "version": {},
        })

        LOGGER.info("Fetch and process backup logs..")
        logged_backup_data = fetch_backup_data(args, session, data["nodes"])

    all_vms = {
        str(entry["vmid"]): entry
        for entry in data["cluster"]["resources"]
        if entry["type"] in ("lxc", "qemu")
    }

    backup_data = {
        # generate list of all VMs IDs - both lxc and qemu
        "vmids":
        sorted(list(all_vms.keys())),
        # look up scheduled backups and extract assigned VMIDs
        "scheduled_vmids":
        sorted(
            list(
                set(vmid  #
                    for backup in data["cluster"]["backup"]
                    if "vmid" in backup and backup["enabled"] == "1"
                    for vmid in backup["vmid"].split(",")))),
        # add data of actually logged VMs
        "logged_vmids":
        logged_backup_data,
    }

    LOGGER.info("all VMs:          %r", backup_data["vmids"])
    LOGGER.info("expected backups: %r", backup_data["scheduled_vmids"])
    LOGGER.info("actual backups:   %r",
                sorted(list(logged_backup_data.keys())))

    LOGGER.info("Write agent output..")
    for node in data["nodes"]:
        assert node["type"] == "node"
        piggyback_host = None if args.hostname.startswith(
            node["node"] + ".") else node["node"]
        with ConditionalPiggybackSection(piggyback_host):
            with SectionWriter("proxmox_ve_node_info") as writer:
                writer.append_json({
                    "status":
                    node["status"],
                    "lxc": [
                        vmid for vmid in all_vms
                        if all_vms[vmid]["type"] == "lxc"
                    ],
                    "qemu": [
                        vmid for vmid in all_vms
                        if all_vms[vmid]["type"] == "qemu"
                    ],
                    "proxmox_ve_version":
                    node["version"],
                    "subscription": {
                        key: value
                        for key, value in node["subscription"].items()
                        if key in {
                            "status",
                            "checktime",
                            "key",
                            "level",
                            "nextduedate",
                            "productname",
                            "regdate",
                        }
                    },
                })
            with SectionWriter("proxmox_ve_mem_usage") as writer:
                writer.append_json({
                    "mem": node["mem"],
                    "max_mem": node["maxmem"],
                })
            with SectionWriter("uptime", separator=None) as writer:
                writer.append(node["uptime"])

    for vmid, vm in all_vms.items():
        with ConditionalPiggybackSection(vm["name"]):
            with SectionWriter("proxmox_ve_vm_info") as writer:
                writer.append_json({
                    "vmid": vmid,
                    "node": vm["node"],
                    "type": vm["type"],
                    "status": vm["status"],
                    "name": vm["name"],
                })
            if vm["type"] != "qemu":
                with SectionWriter("proxmox_ve_disk_usage") as writer:
                    writer.append_json({
                        "disk": vm["disk"],
                        "max_disk": vm["maxdisk"],
                    })
            with SectionWriter("proxmox_ve_mem_usage") as writer:
                writer.append_json({
                    "mem": vm["mem"],
                    "max_mem": vm["maxmem"],
                })
            with SectionWriter("proxmox_ve_vm_backup_status") as writer:
                writer.append_json({
                    # todo: info about erroneous backups
                    "last_backup": logged_backup_data.get(vmid),
                })
Пример #17
0
def run_metrics(client: Client, services: Iterable[GCPService]) -> None:
    for s in services:
        with SectionWriter(f"gcp_service_{s.name.lower()}") as w:
            for result in time_series(client, s):
                w.append(Result.serialize(result))
Пример #18
0
def run_assets(client: Client) -> None:
    with SectionWriter("gcp_assets") as w:
        w.append(json.dumps(dict(project=client.project)))
        for asset in gather_assets(client):
            w.append(Asset.serialize(asset))
Пример #19
0
 def sections(self) -> None:
     # with ConditionalPiggybackSection(self.name):
     with SectionWriter("k8s_pod_resources") as writer:
         writer.append(self.pod_resources().json())
Пример #20
0
def run(client: Client, s: GCPService) -> None:
    with SectionWriter(f"gcp_service_{s.name.lower()}") as w:
        for result in time_series(client, s):
            w.append(Result.serialize(result))