Beispiel #1
0
def main():
    parser = argparse.ArgumentParser("log a message with standard metwork"
                                     "logging system")
    parser.add_argument('--application-name',
                        '-a',
                        action="store",
                        default="default",
                        help="application name")
    parser.add_argument(
        'LEVEL',
        action='store',
        help="Niveau de log",
        choices=['ERROR', 'CRITICAL', 'WARNING', 'INFO', 'DEBUG'])
    parser.add_argument('MESSAGE', action='store', help="message to log")
    options = parser.parse_args()

    logger = getLogger(options.application_name)
    if options.LEVEL == 'DEBUG':
        logger.debug(options.MESSAGE)
    elif options.LEVEL == 'INFO':
        logger.info(options.MESSAGE)
    elif options.LEVEL == 'WARNING':
        logger.warning(options.MESSAGE)
    elif options.LEVEL == 'CRITICAL':
        logger.critical(options.MESSAGE)
    elif options.LEVEL == 'ERROR':
        logger.error(options.MESSAGE)
    else:
        raise Exception("Bad message level: %s", options.LEVEL)
Beispiel #2
0
def main():
    parser = argparse.ArgumentParser("log a message with standard metwork"
                                     "logging system")
    parser.add_argument('--application-name', '-a', action="store",
                        default="default", help="application name")
    parser.add_argument('LEVEL', action='store',
                        help="Niveau de log",
                        choices=['ERROR', 'CRITICAL', 'WARNING', 'INFO',
                                 'DEBUG'])
    parser.add_argument('MESSAGE', action='store', help="message to log")
    options = parser.parse_args()

    logger = getLogger(options.application_name)
    if options.LEVEL == 'DEBUG':
        logger.debug(options.MESSAGE)
    elif options.LEVEL == 'INFO':
        logger.info(options.MESSAGE)
    elif options.LEVEL == 'WARNING':
        logger.warning(options.MESSAGE)
    elif options.LEVEL == 'CRITICAL':
        logger.critical(options.MESSAGE)
    elif options.LEVEL == 'ERROR':
        logger.error(options.MESSAGE)
    else:
        raise Exception("Bad message level: %s", options.LEVEL)
Beispiel #3
0
 def _get_logger(self):
     """Get a logger."""
     if not self.__logger:
         logger_name = "mfdata.%s.%s" % (
             self.plugin_name,
             self._get_step_or_daemon_name(),
         )
         self.__logger = mflog.getLogger(logger_name)
     return self.__logger
Beispiel #4
0
 def _get_logger(self):
     """Get a logger."""
     if not self.__logger:
         logger_name = "mfdata.%s.%s" % (
             self.plugin_name,
             self.step_name if self.step_name is not None else "notset",
         )
         self.__logger = mflog.getLogger(logger_name)
     return self.__logger
Beispiel #5
0
# State timeout redis.
redisTimeoutState = False

# Flag request "clean" stop.
stopRequest = False

# Date of last event issued independently from the Notifier instance
dtLastEvent = 0

# Value of application security timeout
# Maximum duration of non-receipt of event before stopping the application
inactivityTimeout = DEFAULT_INACTIVITY_TIMEOUT

# Definition logger
logger = getLogger("directory_observer")


class Monitor(pyinotify.ProcessEvent):
    redis = None
    server = None
    port = None
    wm = None

    ##
    # Initialization of the instance
    #
    # Classes derived from ProcessEvent must not overload __init __ ()
    # But implement my_init () which will be automatically called
    # by __init __ () of the base class.
    # Attention my_init only supports named parameters.
Beispiel #6
0
#!/usr/bin/env python3

import os
import psutil
import sys
import argparse
import time
import json

from mflog import getLogger

LOG = getLogger('list_metwork_processes')
USER = os.environ.get('MODULE_RUNTIME_USER', None)
MODULE = os.environ.get('MODULE', None)
MODULE_RUNTIME_HOME = os.environ.get('MODULE_RUNTIME_HOME', None)
MODULE_RUNTIME_HOME_TMP = MODULE_RUNTIME_HOME + "/tmp" if MODULE_RUNTIME_HOME \
    is not None else None
if USER is None:
    LOG.critical("can't read MODULE_RUNTIME_USER env var")
    sys.exit(1)
if MODULE is None:
    LOG.critical("can't read MODULE env var")
    sys.exit(1)
CURRENT_PROCESS = psutil.Process()
CURRENT_PLUGIN_ENV_VAR = "%s_CURRENT_PLUGIN_NAME" % MODULE


def is_same_family(child, proc):
    if child.pid == proc.pid:
        return True
    try:
Beispiel #7
0
from circus.stream import FileStream
import psutil
from mflog import getLogger

log = getLogger("circus_streammultifiles")


class MultiFilesStream(FileStream):

    _filename = None
    _time_format = None
    _numprocesses = None

    # dict pid => (slot number, opened FileStream)
    _slots = None

    def clean_old_pids(self):
        pids = list(self._slots.keys())
        for pid in pids:
            if not psutil.pid_exists(pid):
                slot_number, file_stream = self._slots[pid]
                file_stream.close()
                del(self._slots[pid])

    def pid_to_slot_number_stream(self, pid):
        if pid in self._slots:
            return self._slots[pid]
        self.clean_old_pids()
        used_slot_numbers = [x[0] for x in self._slots.values()]
        for i in range(0, self._numprocesses):
            if i not in used_slot_numbers:
Beispiel #8
0
def get_logger(*args, **kwargs):
    os.environ['MFCOM_LOG_STDERR'] = '/dev/null'
    set_logging_config()
    return getLogger(*args, **kwargs)
Beispiel #9
0
#!/usr/bin/env python

import os
import time
import psutil
from datetime import datetime
from telegraf_unixsocket_client import TelegrafUnixSocketClient
from mflog import getLogger

MFMODULE_RUNTIME_HOME = os.environ["MFMODULE_RUNTIME_HOME"]
SOCKET_PATH = os.path.join(MFMODULE_RUNTIME_HOME, "var", "telegraf.socket")
LOGGER = getLogger("telegraf_collector_custom_diskio")

old_read_bytes = None
old_write_bytes = None
old_dt = None
first = True
while True:
    LOGGER.debug("waiting 10s...")
    time.sleep(10)
    client = TelegrafUnixSocketClient(SOCKET_PATH)
    try:
        client.connect()
    except Exception:
        LOGGER.warning("can't connect to %s, wait 10s and try again...",
                       SOCKET_PATH)
        continue
    stats = psutil.disk_io_counters(perdisk=False)
    new_dt = datetime.now()
    new_read_bytes = stats.read_bytes
    new_write_bytes = stats.write_bytes
Beispiel #10
0
 def get_logger(self):
     if not self.__logger:
         logger_name = "mfdata.%s.%s" % (self.plugin_name, self.daemon_name)
         self.__logger = mflog.getLogger(logger_name)
     return self.__logger
Beispiel #11
0
import os
from mflog import getLogger
import json
from aiohttp import web, ClientSession, BasicAuth, ClientTimeout

import aiohttp_github_helpers as h

GITHUB_USER = os.environ['GITHUB_USER']
GITHUB_PASS = os.environ['GITHUB_PASS']
GITHUB_SECRET = os.environ['GITHUB_SECRET'].encode('utf8')
LOGGER = getLogger("github_webhook_pr_labelling")
TIMEOUT = ClientTimeout(total=20)
AUTH = BasicAuth(GITHUB_USER, GITHUB_PASS)
IGNORE_CONTEXT_GLOBS = ["mergify*"]


async def handle(request):
    event = request['github_event']
    if event != 'status':
        LOGGER.info("ignoring %s event" % event)
        return web.Response(text="ignoring %s event" % event)
    body = await request.read()
    decoded_body = json.loads(body.decode('utf8'))
    repo = decoded_body['repository']['name']
    owner = decoded_body['repository']['owner']['login']
    sha = decoded_body['sha']
    async with ClientSession(auth=AUTH, timeout=TIMEOUT) as session:
        topics = await h.github_get_repo_topics(session, owner, repo)
        if ('integration-level-2' not in topics) and \
                ('integration-level-3' not in topics) and \
                ('integration-level-4' not in topics) and \
Beispiel #12
0
#!/usr/bin/env python3

import sys
import os
import argparse
from xattrfile import XattrFile
from mflog import getLogger
from mfutil import get_unique_hexa_identifier
from acquisition.utils import get_plugin_step_directory_path

logger = getLogger("mfdata.reinject_plugin_step")


def main():
    parser = argparse.ArgumentParser("Inject a file into a plugin/step")
    parser.add_argument("filepath", type=str, help="filepath to inject")
    parser.add_argument("--plugin",
                        type=str,
                        help="plugin name (default :guess_file_type)",
                        default="guess_file_type")
    parser.add_argument("--step",
                        type=str,
                        help="step name (default: main)",
                        default="main")
    parser.add_argument("--move",
                        action="store_true",
                        help="move the file instead of copying it "
                        "(default: copy)")
    parser.add_argument("--random-basename",
                        action="store_true",
                        help="use a random basename for copying/moving "
Beispiel #13
0
from circus.plugins import CircusPlugin
from zmq.eventloop import ioloop
from mfutil import add_inotify_watch
from mflog import getLogger
from inotify_simple import INotify, flags

log = getLogger("circus_autorestart")


class CircusAutorestart(CircusPlugin):
    """Circus plugin to automatically restart watchers.

    Watchers with a working_dir containing the string "/plugins/" are
    checked every second. If a change is detected in the configured working
    dir, the watcher is restarted.

    Args:
        name : the name of the plugin as a string.
        periodic: a PeriodicCallback object to call the
            ping() method every second.
        periodic10: a PeriodicCallback object to call the
            fill_watchers() method every 10 seconds.
        watchers: a set with watcher names (with "/plugins/" in configured
            working_dir).
        watchers_working_dir: a dict with watcher name as a key and
            corresponding inotify watcher as a value.
    """

    name = 'autorestart'
    periodic = None
    watchers = None
Beispiel #14
0
from mflog import getLogger
from mfutil import BashWrapper

LOGGER = getLogger("circus_hooks")


def _call(cmd):
    LOGGER.info("Calling %s..." % cmd)
    r = BashWrapper(cmd)
    if r.code != 0:
        msg = "Bad return code: %i from cmd: %s with output: %s" % \
              (r.code, cmd, str(r).replace("\n", " "))
        LOGGER.warning(msg)
        return False
    return True


def _conditional_call(prefix, watcher_name, params=None):
    if watcher_name is not None:
        cmd = "%s_%s" % (prefix, watcher_name)
    else:
        cmd = prefix
    r = BashWrapper("which %s" % cmd)
    if r.code == 0:
        if params is not None:
            cmd = "%s %s" % (cmd, " ".join(params))
        return _call(cmd)
    else:
        return True

Beispiel #15
0
from mflog import getLogger
from mfutil import BashWrapper

LOGGER = getLogger("circus_hooks")


def _call(cmd):
    LOGGER.info("Calling %s..." % cmd)
    r = BashWrapper(cmd)
    if r.code != 0:
        msg = "Bad return code: %i from cmd: %s with output: %s" % \
              (r.code, cmd, str(r).replace("\n", " "))
        LOGGER.warning(msg)
        return False
    return True


def _conditional_call(prefix, watcher_name):
    cmd = "%s_%s" % (prefix, watcher_name)
    r = BashWrapper("which %s" % cmd)
    if r.code == 0:
        return _call(cmd)
    else:
        return True


def before_start_shell(watcher, arbiter, hook_name, **kwargs):
    return _conditional_call("before_start", watcher.name)


def after_stop_shell(watcher, arbiter, hook_name, **kwargs):
import os
from mflog import getLogger
import json
from aiohttp import web, ClientSession, BasicAuth, ClientTimeout

import aiohttp_github_helpers as h

GITHUB_USER = os.environ['GITHUB_USER']
GITHUB_PASS = os.environ['GITHUB_PASS']
GITHUB_SECRET = os.environ['GITHUB_SECRET'].encode('utf8')
LOGGER = getLogger("github_webhook_no_pullrequest_on_master")
TIMEOUT = ClientTimeout(total=20)
AUTH = BasicAuth(GITHUB_USER, GITHUB_PASS)

GUIDE_URL = "https://help.github.com/articles/" \
    "changing-the-base-branch-of-a-pull-request/"

COMMENT1 = """
Hi ! I'm the MetworkBot.

Thank you for contributing to this project.

But we don't accept pull requests on `master` branch as said in our
contributing guide.

=> You have to change the base branch of your pull request to
`integration` branch.

It's very easy to do that by following this [github guide](%s).

Many thanks !
Beispiel #17
0
#!/usr/bin/env python

# inspired from
# https://github.com/monitoring-tools/telegraf-plugins/tree/master/netstat

import os
import time
import json
import fnmatch
from telegraf_unixsocket_client import TelegrafUnixSocketClient
from mflog import getLogger
from mfutil import BashWrapper

MFMODULE_RUNTIME_HOME = os.environ["MFMODULE_RUNTIME_HOME"]
SOCKET_PATH = os.path.join(MFMODULE_RUNTIME_HOME, "var", "telegraf.socket")
LOGGER = getLogger("telegraf_collector_metwork_module")
MFMODULE = os.environ['MFMODULE']
CMD = "list_metwork_processes.py --output-format=json --include-current-family"
MONITORING_CMDLINE_PATTERNS = [
    '*telegraf*', '*list_metwork_processes*', '*jsonlog2elasticsearch*'
]
IS_MONITORING_MODULE = (MFMODULE in ['MFSYSMON', 'MFADMIN'])


def is_cmdline_monitoring(cmdline):
    if IS_MONITORING_MODULE:
        return True
    for pattern in MONITORING_CMDLINE_PATTERNS:
        if fnmatch.fnmatch(cmdline, pattern):
            return True
    return False
#!/usr/bin/env python

# inspired from
# https://github.com/monitoring-tools/telegraf-plugins/tree/master/netstat

import os
import time
import psutil
from datetime import datetime
from telegraf_unixsocket_client import TelegrafUnixSocketClient
from mflog import getLogger
from mfutil import BashWrapper

MODULE_RUNTIME_HOME = os.environ["MODULE_RUNTIME_HOME"]
SOCKET_PATH = os.path.join(MODULE_RUNTIME_HOME, "var", "telegraf.socket")
LOGGER = getLogger("telegraf_collector_custom_netstat")
CMD = "ss -t -a -n"
STATES = {
    "ESTAB": "tcp_established",
    "SYN-SENT": "tcp_syn_sent",
    "SYN-RECV": "tcp_syn_recv",
    "FIN-WAIT-1": "tcp_fin_wait1",
    "FIN-WAIT-2": "tcp_fin_wait2",
    "TIME-WAIT": "tcp_time_wait",
    "UNCONN": "tcp_close",
    "CLOSE-WAIT": "tcp_close_wait",
    "LAST-ACK": "tcp_last_ack",
    "LISTEN": "tcp_listen",
    "CLOSING": "tcp_closing",
    "UNKNOWN": "tcp_none",
    "__TOTAL": "tcp_total"
#!/usr/bin/env python3

import psutil
import sys
import argparse
import json

from mfutil import kill_process_and_children, BashWrapper
from mfutil.cli import MFProgress
from mflog import getLogger

LOG = getLogger('kill_remaining_processes')


def get_processes_to_kill():
    x = BashWrapper("list_metwork_processes.py --pids-only "
                    "--output-format=json")
    if not x:
        LOG.warning("can't execute: %s" % x)
        sys.exit(1)
    try:
        pids = json.loads(x.stdout)
    except Exception:
        LOG.warning("bad output: %s" % x)
        sys.exit(1)
    return pids


argparser = argparse.ArgumentParser(description="kill remaining non-terminal "
                                    "processes after a module stop")
debug_doc = "debug mode: show processes killed"
Beispiel #20
0
#!/usr/bin/env python3

import os
import time
from mflog import getLogger
from mfutil import BashWrapper, BashWrapperOrRaise
from mfext.conf_monitor import ConfMonitorRunner, md5sumfile

LOGGER = getLogger("conf_monitor")
MFMODULE_RUNTIME_HOME = os.environ['MFMODULE_RUNTIME_HOME']
NGINX_FLAG = (int(os.environ['MFSERV_NGINX_FLAG']) == 1)


def make_new_nginx_conf():
    new_nginx_conf = "%s/tmp/tmp_nginx_conf2" % MFMODULE_RUNTIME_HOME
    cmd = "_make_nginx_conf >%s" % new_nginx_conf
    BashWrapperOrRaise(cmd)
    return (new_nginx_conf, md5sumfile(new_nginx_conf))


def get_old_nginx_conf():
    old_nginx_conf = "%s/tmp/config_auto/nginx.conf" % MFMODULE_RUNTIME_HOME
    return (old_nginx_conf, md5sumfile(old_nginx_conf))


def restart_nginx(old_conf, new_conf):
    os.unlink(old_conf)
    os.rename(new_conf, old_conf)
    x = BashWrapper("_nginx.reload")
    if not x:
        LOGGER.warning(x)
Beispiel #21
0
#!/usr/bin/env python3

import os
import psutil
import sys
import argparse

from mflog import getLogger

LOG = getLogger('kill_zombies_nginx_workers')
USER = os.environ.get('MODULE_RUNTIME_USER', None)
if USER is None:
    LOG.critical("can't read MODULE_RUNTIME_USER env var")
    sys.exit(1)
MODULE_RUNTIME_HOME = os.environ.get('MODULE_RUNTIME_HOME', None)
if MODULE_RUNTIME_HOME is None:
    LOG.critical("can't read MODULE_RUNTIME_HOME env var")
    sys.exit(1)


def get_pids():
    pids = []
    for proc in psutil.process_iter():
        try:
            if proc.username() != USER:
                continue
            if proc.ppid() != 1:
                continue
            cmdline = " ".join(proc.cmdline())
            if "nginx: worker process" not in cmdline:
                continue
#!/usr/bin/env python3

import psutil
import sys
import argparse
import json

from mfutil import kill_process_and_children, BashWrapper
from mflog import getLogger

LOG = getLogger('kill_remaining_processes')


def get_processes_to_kill():
    x = BashWrapper("list_metwork_processes.py --pids-only "
                    "--output-format=json")
    if not x:
        LOG.warning("can't execute: %s" % x)
        sys.exit(1)
    try:
        pids = json.loads(x.stdout)
    except Exception:
        LOG.warning("bad output: %s" % x)
        sys.exit(1)
    return pids


argparser = argparse.ArgumentParser(description="kill remaining non-terminal "
                                    "processes after a module stop")
silent_doc = "silent mode: return only the number of processes killed and " \
    "the number of remaining processes"
Beispiel #23
0
def get_logger(*args, **kwargs):
    os.environ['MFCOM_LOG_STDERR'] = '/dev/null'
    set_logging_config()
    return getLogger(*args, **kwargs)
#!/usr/bin/env python3

import os
import time
from telegraf_unixsocket_client import TelegrafUnixSocketClient
from mfutil import BashWrapper
from mflog import getLogger

MODULE_RUNTIME_HOME = os.environ["MODULE_RUNTIME_HOME"]
VAR_IN_PATH = os.path.join(MODULE_RUNTIME_HOME, "var", "in")
SOCKET_PATH = os.path.join(MODULE_RUNTIME_HOME, "var", "telegraf.socket")
LOGGER = getLogger("telegraf_collector_var_in_files_count")


def get_var_in_directories():
    result = []
    var_in_content = os.listdir(VAR_IN_PATH)
    for name in var_in_content:
        full_path = os.path.join(VAR_IN_PATH, name)
        if os.path.isdir(full_path):
            result.append(full_path)
    return result


def get_file_count(directory):
    cmd = "find %s -type f 2>/dev/null |wc -l" % directory
    x = BashWrapper(cmd)
    if x:
        return int(x.stdout)

Beispiel #25
0
#!/usr/bin/env python3

import os
import psutil
import sys
import argparse
import time
import json

from mflog import getLogger

LOG = getLogger('list_metwork_processes')
USER = os.environ.get('MODULE_RUNTIME_USER', None)
MODULE = os.environ.get('MODULE', None)
MODULE_RUNTIME_HOME = os.environ.get('MODULE_RUNTIME_HOME', None)
MODULE_RUNTIME_HOME_TMP = MODULE_RUNTIME_HOME + "/tmp" if MODULE_RUNTIME_HOME \
    is not None else None
if USER is None:
    LOG.critical("can't read MODULE_RUNTIME_USER env var")
    sys.exit(1)
if MODULE is None:
    LOG.critical("can't read MODULE env var")
    sys.exit(1)
CURRENT_PROCESS = psutil.Process()
CURRENT_PLUGIN_ENV_VAR = "%s_CURRENT_PLUGIN_NAME" % MODULE


def is_same_family(child, proc):
    if child.pid == proc.pid:
        return True
    try: