Esempio n. 1
0
    def incident_declared(self, triggering_event):
        self.trigger = triggering_event
        # choose a name for the logfile
        now = time.time()
        unique = os.urandom(4)
        unique_s = base32.encode(unique)
        self.name = "incident-%s-%s" % (self.format_time(now), unique_s)
        filename = self.name + ".flog"
        self.abs_filename = os.path.join(self.basedir, filename)
        self.abs_filename_bz2 = self.abs_filename + ".bz2"
        self.abs_filename_bz2_tmp = self.abs_filename + ".bz2.tmp"
        # open logfile. We use both an uncompressed one and a compressed one.
        self.f1 = open(self.abs_filename, "wb")
        self.f2 = bz2.BZ2File(self.abs_filename_bz2_tmp, "wb")

        # write header with triggering_event
        self.f1.write(flogfile.MAGIC)
        self.f2.write(flogfile.MAGIC)
        flogfile.serialize_header(self.f1,
                                  "incident",
                                  trigger=triggering_event,
                                  versions=app_versions.versions,
                                  pid=os.getpid())
        flogfile.serialize_header(self.f2,
                                  "incident",
                                  trigger=triggering_event,
                                  versions=app_versions.versions,
                                  pid=os.getpid())

        if self.TRAILING_DELAY is not None:
            # subscribe to events that occur after this one
            self.still_recording = True
            self.remaining_events = self.TRAILING_EVENT_LIMIT
            self.logger.addObserver(self.trailing_event)

        # use self.logger.buffers, copy events into logfile
        events = list(self.logger.get_buffered_events())
        events.sort(key=O.itemgetter('num'))

        for e in events:
            flogfile.serialize_wrapper(self.f1,
                                       e,
                                       from_=self.tubid_s,
                                       rx_time=now)
            flogfile.serialize_wrapper(self.f2,
                                       e,
                                       from_=self.tubid_s,
                                       rx_time=now)

        self.f1.flush()
        # the BZ2File has no flush method

        if self.TRAILING_DELAY is None:
            self.active = False
            eventually(self.finished_recording)
        else:
            # now we wait for the trailing events to arrive
            self.timer = reactor.callLater(self.TRAILING_DELAY,
                                           self.stop_recording)
Esempio n. 2
0
 def _open_savefile(self, now):
     new_filename = "from-%s---to-present.flog" % self.format_time(now)
     self._savefile_name = os.path.join(self.basedir, new_filename)
     self._savefile = open(self._savefile_name, "ab", 0)
     self._starting_timestamp = now
     flogfile.serialize_header(self._savefile,
                               "gatherer",
                               start=self._starting_timestamp)
Esempio n. 3
0
 def _open_savefile(self, now):
     new_filename = "from-%s---to-present.flog" % self.format_time(now)
     self._savefile_name = os.path.join(self.basedir, new_filename)
     self._savefile = open(self._savefile_name, "ab", 0)
     self._savefile.write(flogfile.MAGIC)
     self._starting_timestamp = now
     flogfile.serialize_header(self._savefile, "gatherer",
                               start=self._starting_timestamp)
Esempio n. 4
0
 def __init__(self, filename, level=OPERATIONAL):
     if filename.endswith(".bz2"):
         import bz2
         self._logFile = bz2.BZ2File(filename, "w")
     else:
         self._logFile = open(filename, "wb")
     self._level = level
     flogfile.serialize_header(self._logFile,
                               "log-file-observer",
                               versions=app_versions.versions,
                               pid=os.getpid(),
                               threshold=level)
Esempio n. 5
0
 def __init__(self, filename, level=OPERATIONAL):
     if filename.endswith(".bz2"):
         import bz2
         self._logFile = bz2.BZ2File(filename, "w")
     else:
         self._logFile = open(filename, "wb")
     self._level = level
     flogfile.serialize_header(self._logFile,
                               "log-file-observer",
                               versions=app_versions.versions,
                               pid=os.getpid(),
                               threshold=level)
Esempio n. 6
0
    def incident_declared(self, triggering_event):
        self.trigger = triggering_event
        # choose a name for the logfile
        now = time.time()
        unique = os.urandom(4)
        unique_s = base32.encode(unique)
        self.name = "incident-%s-%s" % (self.format_time(now), unique_s)
        filename = self.name + ".flog"
        self.abs_filename = os.path.join(self.basedir, filename)
        self.abs_filename_bz2 = self.abs_filename + ".bz2"
        self.abs_filename_bz2_tmp = self.abs_filename + ".bz2.tmp"
        # open logfile. We use both an uncompressed one and a compressed one.
        self.f1 = open(self.abs_filename, "wb")
        self.f2 = bz2.BZ2File(self.abs_filename_bz2_tmp, "wb")

        # write header with triggering_event
        self.f1.write(flogfile.MAGIC)
        self.f2.write(flogfile.MAGIC)
        flogfile.serialize_header(self.f1, "incident",
                                  trigger=triggering_event,
                                  versions=app_versions.versions,
                                  pid=os.getpid())
        flogfile.serialize_header(self.f2, "incident",
                                  trigger=triggering_event,
                                  versions=app_versions.versions,
                                  pid=os.getpid())

        if self.TRAILING_DELAY is not None:
            # subscribe to events that occur after this one
            self.still_recording = True
            self.remaining_events = self.TRAILING_EVENT_LIMIT
            self.logger.addObserver(self.trailing_event)

        # use self.logger.buffers, copy events into logfile
        events = list(self.logger.get_buffered_events())
        events.sort(lambda a,b: cmp(a['num'], b['num']))
        for e in events:
            flogfile.serialize_wrapper(self.f1, e,
                                       from_=self.tubid_s, rx_time=now)
            flogfile.serialize_wrapper(self.f2, e,
                                       from_=self.tubid_s, rx_time=now)

        self.f1.flush()
        # the BZ2File has no flush method

        if self.TRAILING_DELAY is None:
            self.active = False
            eventually(self.finished_recording)
        else:
            # now we wait for the trailing events to arrive
            self.timer = reactor.callLater(self.TRAILING_DELAY,
                                           self.stop_recording)
Esempio n. 7
0
 def __init__(self, filename, level=OPERATIONAL):
     if filename.endswith(".bz2"):
         # py3: bz2file ignores "b", only accepts bytes, not str
         import bz2
         f = bz2.BZ2File(filename, "w")
     else:
         f = open(filename, "wb")
     self._logFile = f  # todo: line_buffering=True ?
     self._level = level
     self._logFile.write(flogfile.MAGIC)
     flogfile.serialize_header(self._logFile,
                               "log-file-observer",
                               versions=app_versions.versions,
                               pid=os.getpid(),
                               threshold=level)
Esempio n. 8
0
 def emit_header(self, versions, pid):
     flogfile.serialize_header(self.f, "tail", versions=versions, pid=pid)
Esempio n. 9
0
 def emit_header(self, versions, pid):
     flogfile.serialize_header(self.f, "tail", versions=versions, pid=pid)