Exemplo n.º 1
0
 def configure_with(self, writer):
     self._ensure_state(State.disconnected)
     twisted_log.addObserver(self.on_twisted_log)
     self._writer = IJournalWriter(writer)
     self._set_state(State.connected)
     self._schedule_flush()
Exemplo n.º 2
0
class Journaler(log.Logger, common.StateMachineMixin):
    implements(IJournaler, ILogKeeper)

    log_category = 'journaler'

    _error_handler = error_handler

    # FIXME: at some point switch to False and remove this attribute
    should_keep_on_logging_to_flulog = True

    def __init__(self, logger):
        log.Logger.__init__(self, self)

        common.StateMachineMixin.__init__(self, State.disconnected)
        self._writer = None
        self._flush_task = None
        self._cache = EntriesCache()
        self._notifier = defer.Notifier()

    def configure_with(self, writer):
        self._ensure_state(State.disconnected)
        twisted_log.addObserver(self.on_twisted_log)
        self._writer = IJournalWriter(writer)
        self._set_state(State.connected)
        self._schedule_flush()

    def close(self, flush_writer=True):

        def set_disconnected():
            self._writer = None
            self._set_state(State.disconnected)

        try:
            twisted_log.removeObserver(self.on_twisted_log)
        except ValueError:
            # it should be safe to call close() multiple times,
            # in this case we are not registered as the observer anymore
            pass

        d = self._close_writer(flush_writer)
        d.addCallback(defer.drop_param, set_disconnected)
        return d

    ### IJournaler ###

    def get_connection(self, externalizer):
        externalizer = IExternalizer(externalizer)
        instance = JournalerConnection(self, externalizer)
        return instance

    def prepare_record(self):
        return Record(self)

    @in_state(State.connected)
    def get_histories(self):
        return self._writer.get_histories()

    @in_state(State.connected)
    def get_entries(self, history):
        return self._writer.get_entries(history)

    def insert_entry(self, **data):
        self._cache.append(data)
        self._schedule_flush()
        return self._notifier.wait('flush')

    @in_state(State.connected)
    def get_filename(self):
        return self._writer.get_filename()

    def is_idle(self):
        if len(self._cache) > 0:
            return False
        if self._writer:
            return self._writer.is_idle()
        return True

    ### ILogObserver provider ###

    def on_twisted_log(self, event_dict):
        edm = event_dict['message']
        if not edm:
            if event_dict['isError'] and 'failure' in event_dict:
                fail = event_dict['failure']
                self.error("A twisted traceback occurred. Exception: %r.",
                           fail.value)
                if flulog.getCategoryLevel("twisted") < flulog.WARN:
                    self.debug(
                        "Run with debug level >= 2 to see the traceback.")
                else:
                    self.error("%s", fail.getTraceback())

    ### ILogKeeper Methods ###

    def do_log(self, level, object, category, format, args,
               depth=-1, file_path=None, line_num=None):
        level = int(level)
        if category is None:
            category = 'feat'
        if level > flulog.getCategoryLevel(category):
            return

        if file_path is None and line_num is None:
            file_path, line_num = flulog.getFileLine(where=-depth-2)

        if args:
            message = format % args
        else:
            message = str(format)

        data = dict(
            entry_type='log',
            level=level,
            log_name=object,
            category=category,
            file_path=file_path,
            line_num=line_num,
            message=message,
            timestamp=int(time.time()))
        self.insert_entry(**data)

        if self.should_keep_on_logging_to_flulog:
            flulog.doLog(level, object, category, format, args,
                         where=depth, filePath=file_path, line=line_num)

    ### private ###

    def _schedule_flush(self):
        if self._flush_task is None:
            self._flush_task = time.call_next(self._flush)

    @in_state(State.connected)
    def _flush(self):
        entries = self._cache.fetch()
        if entries:
            d = self._writer.insert_entries(entries)
            d.addCallbacks(defer.drop_param, self._flush_error,
                           callbackArgs=(self._flush_complete, ))
            return d
        else:
            self._flush_complete()

    def _flush_complete(self):
        if self._cache.is_locked():
            self._cache.commit()
        self._flush_task = None
        self._notifier.callback('flush', None)
        if len(self._cache) > 0:
            self._schedule_flush()

    def _flush_error(self, fail):
        self._cache.rollback()
        fail.raiseException()

    def _close_writer(self, flush_writer=True):
        d = defer.succeed(None)
        if self._writer:
            d.addCallback(defer.drop_param, self._writer.close,
                          flush=flush_writer)
        return d