コード例 #1
0
 def __init__(self):
     self.opts = None
     self.args = None
     self.fuse_server = None
     self.log_cache_dir = None
     self.state_file = None
     self.log_cache_dir = None
     self.runfs_thread = None
     self.log = Dummy()
コード例 #2
0
def init_logger(fmt='pretty',
                quiet=False,
                level='INFO',
                fpath=None,
                pre_hooks=None,
                post_hooks=None,
                metric_grouping_interval=None):

    global LOG
    if LOG is not None:
        return LOG

    if quiet and fpath is None:
        # no need for a log - return a dummy
        return Dummy()

    _configure_logger(fmt, quiet, level, fpath, pre_hooks, post_hooks,
                      metric_grouping_interval)

    log = structlog.get_logger()
    level = getattr(logging, level.upper())
    log.setLevel(level)

    if metric_grouping_interval:
        keep_running = Thread(target=dump_metrics,
                              args=(log, metric_grouping_interval))
        keep_running.daemon = True
        keep_running.start()

    # TODO functionality to change even the level of global stdlib logger.

    LOG = log
    return log
コード例 #3
0
ファイル: basescript.py プロジェクト: deep-compute/basescript
    def __init__(self, args=None):
        # argparse parser obj
        self.parser = argparse.ArgumentParser(description=self.DESC)
        self.define_baseargs(self.parser)

        self.subcommands = self.parser.add_subparsers(title="commands")
        self.subcommands.dest = "commands"
        self.subcommands.required = True
        self.define_subcommands(self.subcommands)
        self.subcommand_run = self.subcommands.add_parser("run")
        self.subcommand_run.set_defaults(func=self.run)

        self.define_args(self.subcommand_run)

        self.args = self.parser.parse_args(args=args)

        self.hostname = socket.gethostname()

        if self.args.metric_grouping_interval:
            self.METRIC_GROUPING_INTERVAL = self.args.metric_grouping_interval

        if self.args.debug:
            if self.args.log_level is None:
                self.args.log_level = "debug"
            if self.args.metric_grouping_interval is None:
                self.args.metric_grouping_interval = 0

        if not self.args.log_level:
            self.args.log_level = "info"
            self.args.metric_grouping_interval = self.METRIC_GROUPING_INTERVAL

        if self.args.metric_grouping_interval is None:
            self.args.metric_grouping_interval = self.METRIC_GROUPING_INTERVAL

        log = init_logger(
            fmt=self.args.log_format,
            quiet=self.args.quiet,
            level=self.args.log_level,
            fpath=self.args.log_file,
            processors=self.define_log_processors(),
            metric_grouping_interval=self.args.metric_grouping_interval,
            minimal=self.args.minimal,
        )

        self._flush_metrics_q = log._force_flush_q
        self.log = log.bind(name=self.args.name)

        if self.args.env_file:
            ReadEnv(self.args.env_file)

        self.stats = Dummy()

        args = {n: getattr(self.args, n) for n in vars(self.args)}
        args["func"] = self.args.func.__name__
        self.log.debug("basescript init", **args)
コード例 #4
0
ファイル: basescript.py プロジェクト: conanmatsui/basescript
    def __init__(self, args=None):
        # argparse parser obj
        self.parser = argparse.ArgumentParser(description=self.DESC)
        self.define_baseargs(self.parser)

        self.subcommands = self.parser.add_subparsers(title='commands')
        self.subcommands.dest = 'commands'
        self.subcommands.required = True
        self.define_subcommands(self.subcommands)
        self.subcommand_run = self.subcommands.add_parser('run')
        self.subcommand_run.set_defaults(func=self.run)

        self.define_args(self.subcommand_run)

        self.args = self.parser.parse_args(args=args)

        self.hostname = socket.gethostname()

        if self.args.metric_grouping_interval:
            self.METRIC_GROUPING_INTERVAL = self.args.metric_grouping_interval

        if self.args.debug:
            if self.args.log_level is None:
                self.args.log_level = 'debug'
            if self.args.metric_grouping_interval is None:
                self.args.metric_grouping_interval = 0

        if not self.args.log_level:
            self.args.log_level = 'info'
            self.args.metric_grouping_interval = self.METRIC_GROUPING_INTERVAL

        if self.args.metric_grouping_interval is None:
            self.args.metric_grouping_interval = self.METRIC_GROUPING_INTERVAL

        self.log = init_logger(
            fmt=self.args.log_format,
            quiet=self.args.quiet,
            level=self.args.log_level,
            fpath=self.args.log_file,
            pre_hooks=self.define_log_pre_format_hooks(),
            post_hooks=self.define_log_post_format_hooks(),
            metric_grouping_interval=self.args.metric_grouping_interval).bind(
                name=self.args.name)

        self.stats = Dummy()

        args = {n: getattr(self.args, n) for n in vars(self.args)}
        args['func'] = self.args.func.__name__
        self.log.debug("basescript init", **args)
コード例 #5
0
ファイル: log.py プロジェクト: ck2135/basescript
def init_logger(
    fmt=None,
    quiet=False,
    level="INFO",
    fpath=None,
    processors=None,
    metric_grouping_interval=None,
    minimal=False,
):
    """
    fmt=pretty/json controls only stderr; file always gets json.
    """

    global LOG
    if LOG is not None:
        return LOG

    if quiet and fpath is None:
        # no need for a log - return a dummy
        return Dummy()

    if not fmt and not quiet:
        fmt = "pretty" if sys.stderr.isatty() else "json"

    _configure_logger(
        fmt, quiet, level, fpath, processors, metric_grouping_interval, minimal
    )

    log = structlog.get_logger()
    log._force_flush_q = queue.Queue(maxsize=FORCE_FLUSH_Q_SIZE)

    if metric_grouping_interval:
        keep_running = Thread(target=dump_metrics, args=(log, metric_grouping_interval))
        keep_running.daemon = True
        keep_running.start()

    # TODO functionality to change even the level of global stdlib logger.

    LOG = log
    return log
コード例 #6
0
# -*- coding: utf-8 -*

import asyncio
import gzip
import io
import msgpack

from tornado.web import RequestHandler as TornadoRequestHandler
from tornado.web import asynchronous
import tornado.ioloop
from kwikapi import BaseRequest, BaseResponse, BaseRequestHandler
from requests.structures import CaseInsensitiveDict

from deeputil import Dummy

DUMMY_LOG = Dummy()


class TornadoRequest(BaseRequest):
    def __init__(self, req_hdlr):
        super().__init__()
        self._request = req_hdlr.request
        self.response = TornadoResponse(req_hdlr)

    @property
    def url(self):
        return self._request.uri

    @property
    def method(self):
        return self._request.method
コード例 #7
0
class LogaggFuseRunner:
    def __init__(self):
        self.opts = None
        self.args = None
        self.fuse_server = None
        self.log_cache_dir = None
        self.state_file = None
        self.log_cache_dir = None
        self.runfs_thread = None
        self.log = Dummy()

    def _mkdir_logdir(self, parent_directory):

        log_dir = os.path.abspath(os.path.join(parent_directory,
                                "logs"))
        if not os.path.isdir(log_dir):
            self.log.debug('making_cache_directory', d=log_dir)
            os.makedirs(log_dir)
        return log_dir

    def _touch_statefile(self, parent_directory):

        state_file = os.path.abspath(os.path.join(parent_directory,
                                "trackfiles.txt"))
        if not os.path.exists(state_file):
            self.log.debug('making_state_file', f=state_file)
            open(state_file, 'a').close()
        return state_file

    def runfs(self):
        usage = """
    Logagg Log collection FUSE filesystem

    """ + Fuse.fusage
        #argument parsing
        server = LogaggFS(version="%prog " + fuse.__version__,
                     usage=usage,
                     dash_s_do='setsingle',
                     file_class=LogaggFSFile)
        self.fuse_server = server

        p = server.parser
        p.add_option(mountopt='root', metavar='PATH',
                                 help='mountpoint')
        p.add_option(mountopt='loglevel', metavar='DEBUG/INFO' ,default='INFO',
                                help='level of logger')
        p.add_option(mountopt='logfile', metavar='PATH', default='/tmp/fuse.log',
                                help='file path to store logs')

        server.parse(values=server, errex=1)
        self.opts, self.args = server.parser.parse_args()

        #initiating logger
        self.log = DUMMY_LOG
        if self.opts.logfile:
            self.log = init_logger(fpath=self.opts.logfile,
                                level=self.opts.loglevel)

        ldir = os.path.abspath(server.root)
        ldir = os.path.join(ldir, '')[:-1]
        self.log_cache_dir = ldir

        server.log_cache_dir = self.log_cache_dir
        LogaggFSFile.log_cache_dir = self.log_cache_dir

        server.log = self.log
        MirrorFSFile.log = self.log

        self.log.debug('starting_up')
        #FIXME: report bug of init_logger not working with fpath=None
        try:
            if server.fuse_args.mount_expected():
                os.chdir(server.log_cache_dir)
        except OSError:
            log.exception("cannot_enter_root_of_underlying_filesystem", file=sys.stderr)
            sys.exit(1)

        # mkdir logs directory and state file inside log cache directory
        self.log_dir = self._mkdir_logdir(parent_directory=self.log_cache_dir)
        self.state_file = self._touch_statefile(parent_directory=self.log_cache_dir)

        # create tracklist for monitoring log files
        tracklist = TrackList(state_file=self.state_file,
                        directory=self.log_dir,
                        log=self.log)
        LogaggFSFile.tracklist = tracklist
        LogaggFSFile.mountpoint = server.fuse_args.mountpoint

        server.main()

    def start(self):
        th = threading.Thread(target=self.runfs)
        th.daemon = True
        th.start()
        self.runfs_thread = th
        th.join()
コード例 #8
0
ファイル: fs.py プロジェクト: deep-compute/logagg-collector
class LogaggFuseRunner:
    """
    Initializes and runs LogaggFs file system
    """

    TRACKFILES_REFRESH_INTERVAL = (
        30)  # Seconds after which data inside trackfiles.txt is read regularly

    def __init__(self):
        self.opts = None
        self.args = None
        self.fuse_server = None
        self.log_cache_dir = None
        self.state_file = None
        self.log_cache_dir = None
        self.runfs_thread = None
        self.log = Dummy()

    def _mkdir_logdir(self, parent_directory):
        """
        Make logcache/logs dir if not present
        """
        # FIXME: use logagg_utils ensure_dir func

        log_dir = os.path.abspath(os.path.join(parent_directory, "logs"))
        if not os.path.isdir(log_dir):
            self.log.debug("making_cache_directory", d=log_dir)
            os.makedirs(log_dir)
        return log_dir

    def _touch_statefile(self, parent_directory):
        """
        Touch logcache/trackfiles.txt filr if not there
        """

        state_file = os.path.abspath(
            os.path.join(parent_directory, "trackfiles.txt"))
        if not os.path.exists(state_file):
            self.log.debug("making_state_file", f=state_file)
            open(state_file, "a").close()
        return state_file

    def runfs(self):
        usage = ("""
        Logagg Log collection FUSE filesystem
        """ + Fuse.fusage)
        # Argument parsing
        server = LogaggFS(
            version="%prog " + fuse.__version__,
            usage=usage,
            dash_s_do="setsingle",
            file_class=LogaggFSFile,
        )
        self.fuse_server = server

        p = server.parser
        p.add_option(mountopt="root", metavar="PATH", help="mountpoint")
        p.add_option(
            mountopt="loglevel",
            metavar="DEBUG/INFO",
            default="INFO",
            help="level of logger",
        )
        p.add_option(
            mountopt="logfile",
            metavar="PATH",
            default="/tmp/fuse.log",
            help="file path to store logs",
        )

        server.parse(values=server, errex=1)
        self.opts, self.args = server.parser.parse_args()

        # initiating logger
        self.log = DUMMY_LOG
        if self.opts.logfile:
            self.log = init_logger(fpath=self.opts.logfile,
                                   level=self.opts.loglevel)

        if not hasattr(server, "root"):
            sys.exit(0)
        ldir = os.path.abspath(server.root)
        ldir = os.path.join(ldir, "")[:-1]
        self.log_cache_dir = ldir

        server.log_cache_dir = self.log_cache_dir
        LogaggFSFile.log_cache_dir = self.log_cache_dir

        server.log = self.log
        MirrorFSFile.log = self.log

        self.log.debug("starting_up")
        # FIXME: report bug of init_logger not working with fpath=None
        try:
            if server.fuse_args.mount_expected():
                os.chdir(server.log_cache_dir)
        except OSError:
            log.exception("cannot_enter_root_of_underlying_filesystem",
                          file=sys.stderr)
            sys.exit(1)

        # mkdir logs directory and state file inside log cache directory
        self.log_dir = self._mkdir_logdir(parent_directory=self.log_cache_dir)
        self.state_file = self._touch_statefile(
            parent_directory=self.log_cache_dir)

        # Create tracklist for monitoring log files
        tracklist = TrackList(state_file=self.state_file,
                              directory=self.log_dir,
                              log=self.log)
        LogaggFSFile.tracklist = tracklist

        # LRU cache that expires in TRACKFILES_REFRESH_INTERVAL sec(s)
        clock = ExpiringCache(1,
                              default_timeout=self.TRACKFILES_REFRESH_INTERVAL)
        clock.put("timeout", "no")
        LogaggFSFile.clock = clock

        LogaggFSFile.mountpoint = server.fuse_args.mountpoint

        server.main()

    def start(self):
        th = threading.Thread(target=self.runfs)
        th.daemon = True
        th.start()
        self.runfs_thread = th
        th.join()
コード例 #9
0
import collections
from deeputil import Dummy

from operator import attrgetter

DUMMY = Dummy()


def memoize(f):
    # from: https://goo.gl/aXt4Qy
    class memodict(dict):
        __slots__ = ()

        def __missing__(self, key):
            self[key] = ret = f(key)
            return ret

    return memodict().__getitem__


@memoize
def load_object(imp_path):
    """Given a path (python import path), load the object.

    eg of path: logagg.formatters.nginx_access
              : logagg.forwarders.mongodb
    """
    module_name, obj_name = imp_path.split('.', 1)
    module = __import__(module_name)
    obj = attrgetter(obj_name)(module)