コード例 #1
0
 def makeDataProducts(self, files, unbanish=False, unignore=False):
     """makes a list of DPs from a list of (filename,quiet) pairs.
     If unbanish is False, DPs with a default "banish" policy will be skipped.
     Symlinks will be resolved, and non-unique filenames removed from list.
     """
     paths = set()
     dps = []
     for filename, quiet in files:
         filename = filename.rstrip('/')
         sourcepath = Purr.canonizePath(filename)
         if sourcepath not in paths:
             paths.add(sourcepath)
             filename = os.path.basename(filename)
             policy, filename, comment = self._default_dp_props.get(
                 filename, ("copy", filename, ""))
             dprintf(4, "%s: default policy is %s,%s,%s\n", sourcepath,
                     policy, filename, comment)
             if policy == "banish":
                 if unbanish:
                     policy = "copy"
                 else:
                     continue
             if unignore and policy == "ignore":
                 policy = "copy"
             dps.append(
                 Purr.DataProduct(filename=filename,
                                  sourcepath=sourcepath,
                                  policy=policy,
                                  comment=comment,
                                  quiet=quiet))
     return sorted(dps, lambda a, b: cmp(a.filename, b.filename))
コード例 #2
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def makeDataProducts(self, files, unbanish=False, unignore=False):
     """makes a list of DPs from a list of (filename,quiet) pairs.
     If unbanish is False, DPs with a default "banish" policy will be skipped.
     Symlinks will be resolved, and non-unique filenames removed from list.
     """
     paths = set()
     dps = []
     for filename, quiet in files:
         filename = filename.rstrip('/')
         sourcepath = Purr.canonizePath(filename)
         if sourcepath not in paths:
             paths.add(sourcepath)
             filename = os.path.basename(filename)
             policy, filename, comment = self._default_dp_props.get(filename, ("copy", filename, ""))
             dprintf(4, "%s: default policy is %s,%s,%s\n", sourcepath, policy, filename, comment)
             if policy == "banish":
                 if unbanish:
                     policy = "copy"
                 else:
                     continue
             if unignore and policy == "ignore":
                 policy = "copy"
             dps.append(Purr.DataProduct(filename=filename, sourcepath=sourcepath,
                                         policy=policy, comment=comment, quiet=quiet))
     return sorted(dps, lambda a, b: cmp(a.filename, b.filename))
コード例 #3
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def rescan(self):
     """Checks files and directories on watchlist for updates, rescans them for new data products.
     If any are found, returns them. Skips those in directories whose watchingState is set to Purr.UNWATCHED.
     """
     if not self.attached:
         return
     dprint(5, "starting rescan")
     newstuff = {};  # this accumulates names of new or changed files. Keys are paths, values are 'quiet' flag.
     # store timestamp of scan
     self.last_scan_timestamp = time.time()
     # go through watched files/directories, check for mtime changes
     for path, watcher in list(self.watchers.items()):
         # get list of new files from watcher
         newfiles = watcher.newFiles()
         # None indicates access error, so drop it from watcher set
         if newfiles is None:
             if watcher.survive_deletion:
                 dprintf(5, "access error on %s, but will still be watched\n", watcher.path)
             else:
                 dprintf(2, "access error on %s, will no longer be watched\n", watcher.path)
                 del self.watchers[path]
             if not watcher.disappeared:
                 self.emit(SIGNAL("disappearedFile"), path)
                 watcher.disappeared = True
             continue
         dprintf(5, "%s: %d new file(s)\n", watcher.path, len(newfiles))
         # if a file has its own watcher, and is independently reported by a directory watcher, skip the directory's
         # version and let the file's watcher report it. Reason for this is that the file watcher may have a more
         # up-to-date timestamp, so we trust it over the dir watcher.
         newfiles = [p for p in newfiles if p is path or p not in self.watchers]
         # skip files in self._unwatched_paths
         newfiles = [filename for filename in newfiles if
                     self._watching_state.get(os.path.dirname(filename)) > Purr.UNWATCHED]
         # Now go through files and add them to the newstuff dict
         for newfile in newfiles:
             # if quiet flag is explicitly set on watcher, enforce it
             # if not pouncing on directory, also add quietly
             if watcher.quiet or self._watching_state.get(os.path.dirname(newfile)) < Purr.POUNCE:
                 quiet = True
             # else add quietly if file is not in the quiet patterns
             else:
                 quiet = matches_patterns(os.path.basename(newfile), self._quiet_patterns)
             # add file to list of new products. Since a file may be reported by multiple
             # watchers, make the quiet flag a logical AND of all the quiet flags (i.e. DP will be
             # marked as quiet only if all watchers report it as quiet).
             newstuff[newfile] = quiet and newstuff.get(newfile, True)
             dprintf(4, "%s: new data product, quiet=%d (watcher quiet: %s)\n", newfile, quiet, watcher.quiet)
             # add a watcher for this file to the temp_watchers list. this is used below
             # to detect renamed and deleted files
             self.temp_watchers[newfile] = Purrer.WatchedFile(newfile)
     # now, go through temp_watchers to see if any newly pounced-on files have disappeared
     for path, watcher in list(self.temp_watchers.items()):
         # get list of new files from watcher
         if watcher.newFiles() is None:
             dprintf(2, "access error on %s, marking as disappeared", watcher.path)
             del self.temp_watchers[path]
             self.emit(SIGNAL("disappearedFile"), path)
     # if we have new data products, send them to the main window
     return self.makeDataProducts(iter(newstuff.items()))
コード例 #4
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def __init__(self, path, canary_patterns=[], **kw):
     Purrer.WatchedDir.__init__(self, path, **kw)
     self.canary_patterns = canary_patterns
     self.canaries = {}
     # if no read errors, make up list of canaries from canary patterns
     if self.fileset is not None:
         for fname in self.fileset:
             if matches_patterns(fname, canary_patterns):
                 fullname = os.path.join(self.path, fname)
                 self.canaries[fullname] = Purrer.WatchedFile(fullname, mtime=self.mtime)
                 dprintf(3, "watching canary file %s, timestamp %s\n",
                         fullname, time.strftime("%x %X", time.localtime(self.mtime)))
コード例 #5
0
 def setWatchingState(self, path, watching, save_config=True):
     dprintf(2, "%s: watching state is %d\n", path, watching)
     self._watching_state[path] = watching
     path = Kittens.utils.collapseuser(path)
     if watching == Purr.REMOVED:
         if self.dirconfig.has_section(path):
             self.dirconfig.remove_section(path)
     else:
         if not self.dirconfig.has_section(path):
             self.dirconfig.add_section(path)
         self.dirconfig.set(path, "watching", str(watching))
     if save_config:
         self.dirconfig.write(file(self.dirconfigfile, 'wt'))
コード例 #6
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def setWatchingState(self, path, watching, save_config=True):
     dprintf(2, "%s: watching state is %d\n", path, watching)
     self._watching_state[path] = watching
     path = Kittens.utils.collapseuser(path)
     if watching == Purr.REMOVED:
         if self.dirconfig.has_section(path):
             self.dirconfig.remove_section(path)
     else:
         if not self.dirconfig.has_section(path):
             self.dirconfig.add_section(path)
         self.dirconfig.set(path, "watching", str(watching))
     if save_config:
         self.dirconfig.write(file(self.dirconfigfile, 'wt'))
コード例 #7
0
 def __init__(self, path, canary_patterns=[], **kw):
     Purrer.WatchedDir.__init__(self, path, **kw)
     self.canary_patterns = canary_patterns
     self.canaries = {}
     # if no read errors, make up list of canaries from canary patterns
     if self.fileset is not None:
         for fname in self.fileset:
             if matches_patterns(fname, canary_patterns):
                 fullname = os.path.join(self.path, fname)
                 self.canaries[fullname] = Purrer.WatchedFile(
                     fullname, mtime=self.mtime)
                 dprintf(
                     3, "watching canary file %s, timestamp %s\n",
                     fullname,
                     time.strftime("%x %X", time.localtime(self.mtime)))
コード例 #8
0
 def __init__ (self,path,quiet=None,mtime=None,survive_deletion=False):
   """Creates watched file at 'path'. The 'quiet' flag is simply stored.
   If 'mtime' is not None, this will be the file's last-changed timestamp.
   If 'mtime' is None, it will use os.path.getmtime().
   The survive_deletion flag is used to mark watchers that should stay active even if the underlying file
   disappears. Watchers for old data products are created with this flag.
   """
   QObject.__init__(self);
   self.path = path;
   self.enabled = True;
   self.quiet = quiet;
   dprintf(3,"creating WatchedFile %s, mtime %s (%f)\n",self.path,time.strftime("%x %X",time.localtime(mtime or self.getmtime())),mtime or 0);
   self.mtime = mtime or self.getmtime();
   self.survive_deletion = survive_deletion;
   self.disappeared = False;
コード例 #9
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def isUpdated(self):
     """Checks if file was updated (i.e. mtime changed) since last check. Returns True if so.
     Returns None on access error."""
     if not self.enabled:
         return None
     mtime = self.getmtime()
     if mtime is None:
         return None
     # clear disappeared flag
     self.disappeared = False
     # compare m,times -- add .1 sec margin since float numbers may get clobbered during
     # conversion
     updated = (mtime or 0) > (self.mtime or 0) + .1
     if updated:
         dprintf(4, "WatchedFile %s is updated: mtime %f %s, old mtime %f %s\n", self.path,
                 mtime, time.strftime("%x %X", time.localtime(mtime)),
                 self.mtime, time.strftime("%x %X", time.localtime(self.mtime)))
     self.mtime = mtime
     return updated
コード例 #10
0
def loadPlugins(paths):
    global bad_plugins
    bad_plugins = {}
    # find potential plugin files
    modfiles = set()
    for path in paths:
        files = set(fnmatch.filter(os.listdir(path), "*.py"))
        files.discard('__init__.py')
        modfiles.update([os.path.join(path, file) for file in files])
    dprintf(1, "%d plugins found\n", len(modfiles))
    # try to import them
    for fullpath in modfiles:
        modfile = os.path.basename(fullpath)
        modname, ext = os.path.splitext(modfile)
        # try to find the module via the imp mechanism
        try:
            fp, pathname, desc = imp.find_module(modname, paths)
        except:
            err = sys.exc_info()[1]
            dprintf(1, "Error finding module for plugin %s: %s\n", fullpath,
                    err)
            bad_plugins[modname] = err
            continue
        # try to import the module
        #   setattr(modname,"plugin_mtime",os.path.getmtime(pathname))
        try:
            try:
                imp.acquire_lock()
                module = imp.load_module('Purr.Plugins.%s' % modname, fp,
                                         pathname, desc)
            finally:
                imp.release_lock()
                if fp:
                    fp.close()
        except:
            err = sys.exc_info()[1]
            dprintf(0, "Error importing module %s: %s\n", pathname, err)
            traceback.print_exc()
            bad_plugins[modname] = err
            continue
        # ok, we have a module, check for vital properties
        dprintf(1, "Imported plugin module '%s' from %s\n", modname, pathname)
    dprintf(1, "%d renderers now available\n", Purr.Render.numRenderers())
コード例 #11
0
ファイル: __init__.py プロジェクト: kernsuite-debian/purr
def loadPlugins(paths):
    global bad_plugins
    bad_plugins = {}
    # find potential plugin files
    modfiles = set()
    for path in paths:
        files = set(fnmatch.filter(os.listdir(path), "*.py"))
        files.discard('__init__.py')
        modfiles.update([os.path.join(path, file) for file in files])
    dprintf(1, "%d plugins found\n", len(modfiles))
    # try to import them
    for fullpath in modfiles:
        modfile = os.path.basename(fullpath)
        modname, ext = os.path.splitext(modfile)
        # try to find the module via the imp mechanism
        try:
            fp, pathname, desc = imp.find_module(modname, paths)
        except:
            err = sys.exc_info()[1]
            dprintf(1, "Error finding module for plugin %s: %s\n", fullpath, err)
            bad_plugins[modname] = err
            continue
        # try to import the module
        #   setattr(modname,"plugin_mtime",os.path.getmtime(pathname))
        try:
            try:
                imp.acquire_lock()
                module = imp.load_module('Purr.Plugins.%s' % modname, fp, pathname, desc)
            finally:
                imp.release_lock()
                if fp:
                    fp.close()
        except:
            err = sys.exc_info()[1]
            dprintf(0, "Error importing module %s: %s\n", pathname, err)
            traceback.print_exc()
            bad_plugins[modname] = err
            continue
        # ok, we have a module, check for vital properties
        dprintf(1, "Imported plugin module '%s' from %s\n", modname, pathname)
    dprintf(1, "%d renderers now available\n", Purr.Render.numRenderers())
コード例 #12
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def updatePoliciesFromEntry(self, entry, new=True):
     # populate default policies and renames based on entry list
     for dp in entry.dps:
         # add default policy
         basename = os.path.basename(dp.sourcepath)
         self._default_dp_props[basename] = dp.policy, dp.filename, dp.comment
         dprintf(4, "file %s: default policy is %s\n", basename, dp.policy)
         # make new watchers for non-ignored files
         if dp.ignored:
             # if ignorelistfile is not set, then we're being called from within
             # _attach(), when older log entries are being loaded. No need to write the file then.
             if new and self.ignorelistfile and os.path.exists(dp.sourcepath):
                 try:
                     open(self.ignorelistfile, 'a').write(
                         "%d %s %s\n" % (os.path.getmtime(dp.sourcepath), dp.policy, dp.sourcepath))
                 except:
                     print("Error writing %s" % self.ignorelistfile)
                     traceback.print_exc()
         else:
             watcher = self.watchers.get(dp.sourcepath, None)
             # if watcher already exists, update timestamp
             if watcher:
                 if watcher.mtime < dp.timestamp:
                     watcher.mtime = dp.timestamp
                     dprintf(4, "file %s, updating timestamp to %s\n",
                             dp.sourcepath, time.strftime("%x %X", time.localtime(dp.timestamp)))
             # else create new watcher
             else:
                 wfile = Purrer.WatchedFile(dp.sourcepath, quiet=dp.quiet, mtime=dp.timestamp, survive_deletion=True)
                 self.watchers[dp.sourcepath] = wfile
                 dprintf(4, "watching file %s, timestamp %s\n",
                         dp.sourcepath, time.strftime("%x %X", time.localtime(dp.timestamp)))
コード例 #13
0
 def isUpdated(self):
     """Checks if file was updated (i.e. mtime changed) since last check. Returns True if so.
     Returns None on access error."""
     if not self.enabled:
         return None
     mtime = self.getmtime()
     if mtime is None:
         return None
     # clear disappeared flag
     self.disappeared = False
     # compare m,times -- add .1 sec margin since float numbers may get clobbered during
     # conversion
     updated = (mtime or 0) > (self.mtime or 0) + .1
     if updated:
         dprintf(
             4,
             "WatchedFile %s is updated: mtime %f %s, old mtime %f %s\n",
             self.path, mtime,
             time.strftime("%x %X", time.localtime(mtime)), self.mtime,
             time.strftime("%x %X", time.localtime(self.mtime)))
     self.mtime = mtime
     return updated
コード例 #14
0
 def newFiles(self):
     """Returns new files (since last call to newFiles, or since creation).
     The only possible new file is the subdirectory itself, which is considered
     new if updated, or if a canary has changed.
     Return value is an iterable, or None on access error."""
     # check directory itself for updates
     if self.fileset is None:
         return None
     # check for new files first
     newfiles = Purrer.WatchedDir.newFiles(self)
     if newfiles is None:
         return None
     # this timestamp is assigned to all canaries when directory has changed
     timestamp = time.time()
     canaries_updated = False
     # check for new canaries among new files
     if newfiles:
         dprintf(3, "directory %s is updated\n", self.path)
         for fname in newfiles:
             if matches_patterns(os.path.basename(fname),
                                 self.canary_patterns):
                 self.canaries[fname] = Purrer.WatchedFile(
                     fname, mtime=timestamp)
                 dprintf(
                     3, "watching new canary file %s, timestamp %s\n",
                     fname,
                     time.strftime("%x %X", time.localtime(timestamp)))
     # else check current canaries for updates
     else:
         for filename, watcher in list(self.canaries.items()):
             updated = watcher.isUpdated()
             if updated is None:
                 dprintf(
                     2,
                     "access error on canary %s, will no longer be watched",
                     filename)
                 del self.canaries[filename]
                 continue
             elif updated:
                 dprintf(3, "canary %s is updated\n", filename)
                 newfiles = True
                 # this is treated as a bool below
                 break
         # now, if directory has updated, reset timestamps on all canaries
         if newfiles:
             for watcher in self.canaries.values():
                 watcher.mtime = timestamp
     # returns ourselves (as new file) if something has updated
     return (newfiles and [self.path]) or []
コード例 #15
0
 def __init__(self,
              path,
              quiet=None,
              mtime=None,
              survive_deletion=False):
     """Creates watched file at 'path'. The 'quiet' flag is simply stored.
     If 'mtime' is not None, this will be the file's last-changed timestamp.
     If 'mtime' is None, it will use os.path.getmtime().
     The survive_deletion flag is used to mark watchers that should stay active even if the underlying file
     disappears. Watchers for old data products are created with this flag.
     """
     QObject.__init__(self)
     self.path = path
     self.enabled = True
     self.quiet = quiet
     dprintf(
         3, "creating WatchedFile %s, mtime %s (%f)\n", self.path,
         time.strftime("%x %X",
                       time.localtime(mtime or self.getmtime())), mtime
         or 0)
     self.mtime = mtime or self.getmtime()
     self.survive_deletion = survive_deletion
     self.disappeared = False
コード例 #16
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def addWatchedDirectory(self, dirname, watching=Purr.WATCHED, save_config=True):
     """Starts watching the specified directories for changes"""
     # see if we're alredy watching this exact set of directories -- do nothing if so
     dirname = Purr.canonizePath(dirname)
     # do nothing if already watching
     if dirname in self.watched_dirs:
         dprint(1, "addWatchDirectory(): already watching %s\n", dirname)
         # watching=None means do not change the watch-state
         if watching is None:
             return
     else:
         if watching is None:
             watching = Purr.WATCHED
         # make watcher object
         wdir = Purrer.WatchedDir(dirname, mtime=self.timestamp,
                                  watch_patterns=self._watch_patterns, ignore_patterns=self._ignore_patterns)
         # fileset=None indicates error reading directory, so ignore it
         if wdir.fileset is None:
             print("There was an error reading the directory %s, will stop watching it." % dirname)
             self.setWatchingState(dirname, Purr.REMOVED, save_config=True)
             return
         self.watchers[dirname] = wdir
         self.watched_dirs.append(dirname)
         dprintf(2, "watching directory %s, mtime %s, %d files\n",
                 dirname, time.strftime("%x %X", time.localtime(wdir.mtime)), len(wdir.fileset))
         # find files in this directory matching the watch_patterns, and watch them for changes
         watchset = set()
         for patt in self._watch_patterns:
             watchset.update(fnmatch.filter(wdir.fileset, patt))
         for fname in watchset:
             quiet = matches_patterns(fname, self._quiet_patterns)
             fullname = Purr.canonizePath(os.path.join(dirname, fname))
             if fullname not in self.watchers:
                 wfile = Purrer.WatchedFile(fullname, quiet=quiet, mtime=self.timestamp)
                 self.watchers[fullname] = wfile
                 dprintf(3, "watching file %s, timestamp %s, quiet %d\n",
                         fullname, time.strftime("%x %X", time.localtime(wfile.mtime)), quiet)
         # find subdirectories  matching the subdir_patterns, and watch them for changes
         for fname in wdir.fileset:
             fullname = Purr.canonizePath(os.path.join(dirname, fname))
             if os.path.isdir(fullname):
                 for desc, dir_patts, canary_patts in self._subdir_patterns:
                     if matches_patterns(fname, dir_patts):
                         quiet = matches_patterns(fname, self._quiet_patterns)
                         wdir = Purrer.WatchedSubdir(fullname, canary_patterns=canary_patts, quiet=quiet,
                                                     mtime=self.timestamp)
                         self.watchers[fullname] = wdir
                         dprintf(3, "watching subdirectory %s/{%s}, timestamp %s, quiet %d\n",
                                 fullname, ",".join(canary_patts),
                                 time.strftime("%x %X", time.localtime(wdir.mtime)), quiet)
                         break
     # set state and save config
     self.setWatchingState(dirname, watching, save_config=save_config)
コード例 #17
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def newFiles(self):
     """Returns new files (since last call to newFiles, or since creation).
     The only possible new file is the subdirectory itself, which is considered
     new if updated, or if a canary has changed.
     Return value is an iterable, or None on access error."""
     # check directory itself for updates
     if self.fileset is None:
         return None
     # check for new files first
     newfiles = Purrer.WatchedDir.newFiles(self)
     if newfiles is None:
         return None
     # this timestamp is assigned to all canaries when directory has changed
     timestamp = time.time()
     canaries_updated = False
     # check for new canaries among new files
     if newfiles:
         dprintf(3, "directory %s is updated\n", self.path)
         for fname in newfiles:
             if matches_patterns(os.path.basename(fname), self.canary_patterns):
                 self.canaries[fname] = Purrer.WatchedFile(fname, mtime=timestamp)
                 dprintf(3, "watching new canary file %s, timestamp %s\n",
                         fname, time.strftime("%x %X", time.localtime(timestamp)))
     # else check current canaries for updates
     else:
         for filename, watcher in list(self.canaries.items()):
             updated = watcher.isUpdated()
             if updated is None:
                 dprintf(2, "access error on canary %s, will no longer be watched", filename)
                 del self.canaries[filename]
                 continue
             elif updated:
                 dprintf(3, "canary %s is updated\n", filename)
                 newfiles = True;  # this is treated as a bool below
                 break
         # now, if directory has updated, reset timestamps on all canaries
         if newfiles:
             for watcher in self.canaries.values():
                 watcher.mtime = timestamp
     # returns ourselves (as new file) if something has updated
     return (newfiles and [self.path]) or []
コード例 #18
0
 def __init__(self, path, watch_patterns=[], ignore_patterns=[], **kw):
     """Initializes directory.
     'ignore_patterns' is a list of patterns to be ignored.
     'watch_patterns' is a list of patterns to be watched.
     New files will be reported only if they don't match any of the ignore patterns, or
     match a watch pattern.
     All other arguments as per WatchedFile
     """
     Purrer.WatchedFile.__init__(self, path, **kw)
     self.watch_patterns = watch_patterns
     self.ignore_patterns = ignore_patterns
     self._newfiles = []
     # the self.fileset attribute gives the current directory content
     try:
         self.fileset = set(os.listdir(self.path))
     except:
         _printexc("Error doing listdir(%s)" % self.path)
         traceback.print_exc()
         self.fileset = None
         # this indicates a read error
         return
     # check for files created after the supplied timestamp
     if self.getmtime() > self.mtime:
         dprintf(
             2,
             "%s modified since last run (%f vs %f), checking for new files\n",
             self.path, self.getmtime(), self.mtime)
         for fname in self.fileset:
             # ignore files from ignore list
             if matches_patterns(
                     fname, ignore_patterns) and not matches_patterns(
                         fname, watch_patterns):
                 dprintf(
                     5,
                     "%s: matches ignore list but not watch list, skipping\n",
                     fname)
                 continue
             fullname = os.path.join(self.path, fname)
             # check creation time against our timestamp
             try:
                 ctime = os.path.getctime(fullname)
             except:
                 _printexc("Error getting ctime for %s, ignoring",
                           fname)
                 continue
             if ctime > self.mtime:
                 dprintf(4, "%s: new file (created %s)\n", fullname,
                         time.strftime("%x %X", time.localtime(ctime)))
                 # append basename to _newfiles: full path added in newFiles() below
                 self._newfiles.append(fname)
コード例 #19
0
 def load(self, pathname):
     """Loads entry from directory."""
     match = self._entry_re.match(pathname)
     if not match:
         return None
     self.ignore = (match.group(1) == "ignore")
     if not os.path.isdir(pathname):
         raise ValueError("%s: not a directory" % pathname)
     if not os.access(pathname, os.R_OK | os.W_OK):
         raise ValueError("%s: insufficient access privileges" % pathname)
     # parse index.html file
     parser = Purr.Parsers.LogEntryIndexParser(pathname)
     self.index_file = os.path.join(pathname, 'index.html')
     for i, line in enumerate(open(self.index_file)):
         try:
             parser.feed(line)
         except:
             dprintf(0, "parse error at line %d of %s\n", i,
                     self.index_file)
             raise
     # set things up from parser
     try:
         self.timestamp = int(float(parser.timestamp))
     except:
         self.timestamp = int(time.time())
     self.title = getattr(parser, 'title', None)
     if self.title is None:
         self.title = "Malformed entry, probably needs to be deleted"
     self.comment = getattr(parser, 'comments', None) or ""
     self.dps = getattr(parser, 'dps', [])
     self.pathname = pathname
     # see if any data products have been removed on us
     self.dps = [dp for dp in self.dps if os.path.exists(dp.fullpath)]
     # see if the cached include file is up-to-date
     self.cached_include = cache = os.path.join(pathname,
                                                'index.include.html')
     mtime = (os.path.exists(cache) or 0) and os.path.getmtime(cache)
     if mtime >= max(Purr.Render.youngest_renderer,
                     os.path.getmtime(self.index_file)):
         dprintf(2, "entry %s has a valid include cache\n", pathname)
         self.cached_include_valid = True
     else:
         dprintf(2, "entry %s does not have a valid include cache\n",
                 pathname)
         self.cached_include_valid = False
     # mark entry as unchanged, if renderers are older than index
     self.updated = (Purr.Render.youngest_renderer > os.path.getmtime(
         self.index_file))
コード例 #20
0
ファイル: LogEntry.py プロジェクト: kernsuite-debian/purr
 def load(self, pathname):
     """Loads entry from directory."""
     match = self._entry_re.match(pathname)
     if not match:
         return None
     self.ignore = (match.group(1) == "ignore")
     if not os.path.isdir(pathname):
         raise ValueError("%s: not a directory" % pathname)
     if not os.access(pathname, os.R_OK | os.W_OK):
         raise ValueError("%s: insufficient access privileges" % pathname)
     # parse index.html file
     parser = Purr.Parsers.LogEntryIndexParser(pathname)
     self.index_file = os.path.join(pathname, 'index.html')
     for i, line in enumerate(open(self.index_file)):
         try:
             parser.feed(line)
         except:
             dprintf(0, "parse error at line %d of %s\n", i, self.index_file)
             raise
     # set things up from parser
     try:
         self.timestamp = int(float(parser.timestamp))
     except:
         self.timestamp = int(time.time())
     self.title = getattr(parser, 'title', None)
     if self.title is None:
         self.title = "Malformed entry, probably needs to be deleted"
     self.comment = getattr(parser, 'comments', None) or ""
     self.dps = getattr(parser, 'dps', [])
     self.pathname = pathname
     # see if any data products have been removed on us
     self.dps = [dp for dp in self.dps if os.path.exists(dp.fullpath)]
     # see if the cached include file is up-to-date
     self.cached_include = cache = os.path.join(pathname, 'index.include.html')
     mtime = (os.path.exists(cache) or 0) and os.path.getmtime(cache)
     if mtime >= max(Purr.Render.youngest_renderer, os.path.getmtime(self.index_file)):
         dprintf(2, "entry %s has a valid include cache\n", pathname)
         self.cached_include_valid = True
     else:
         dprintf(2, "entry %s does not have a valid include cache\n", pathname)
         self.cached_include_valid = False
     # mark entry as unchanged, if renderers are older than index
     self.updated = (Purr.Render.youngest_renderer > os.path.getmtime(self.index_file))
コード例 #21
0
 def updatePoliciesFromEntry(self, entry, new=True):
     # populate default policies and renames based on entry list
     for dp in entry.dps:
         # add default policy
         basename = os.path.basename(dp.sourcepath)
         self._default_dp_props[
             basename] = dp.policy, dp.filename, dp.comment
         dprintf(4, "file %s: default policy is %s\n", basename, dp.policy)
         # make new watchers for non-ignored files
         if dp.ignored:
             # if ignorelistfile is not set, then we're being called from within
             # _attach(), when older log entries are being loaded. No need to write the file then.
             if new and self.ignorelistfile and os.path.exists(
                     dp.sourcepath):
                 try:
                     open(self.ignorelistfile,
                          'a').write("%d %s %s\n" % (os.path.getmtime(
                              dp.sourcepath), dp.policy, dp.sourcepath))
                 except:
                     print("Error writing %s" % self.ignorelistfile)
                     traceback.print_exc()
         else:
             watcher = self.watchers.get(dp.sourcepath, None)
             # if watcher already exists, update timestamp
             if watcher:
                 if watcher.mtime < dp.timestamp:
                     watcher.mtime = dp.timestamp
                     dprintf(
                         4, "file %s, updating timestamp to %s\n",
                         dp.sourcepath,
                         time.strftime("%x %X",
                                       time.localtime(dp.timestamp)))
             # else create new watcher
             else:
                 wfile = Purrer.WatchedFile(dp.sourcepath,
                                            quiet=dp.quiet,
                                            mtime=dp.timestamp,
                                            survive_deletion=True)
                 self.watchers[dp.sourcepath] = wfile
                 dprintf(
                     4, "watching file %s, timestamp %s\n", dp.sourcepath,
                     time.strftime("%x %X", time.localtime(dp.timestamp)))
コード例 #22
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def __init__(self, path, watch_patterns=[], ignore_patterns=[], **kw):
     """Initializes directory.
     'ignore_patterns' is a list of patterns to be ignored.
     'watch_patterns' is a list of patterns to be watched.
     New files will be reported only if they don't match any of the ignore patterns, or
     match a watch pattern.
     All other arguments as per WatchedFile
     """
     Purrer.WatchedFile.__init__(self, path, **kw)
     self.watch_patterns = watch_patterns
     self.ignore_patterns = ignore_patterns
     self._newfiles = []
     # the self.fileset attribute gives the current directory content
     try:
         self.fileset = set(os.listdir(self.path))
     except:
         _printexc("Error doing listdir(%s)" % self.path)
         traceback.print_exc()
         self.fileset = None;  # this indicates a read error
         return
     # check for files created after the supplied timestamp
     if self.getmtime() > self.mtime:
         dprintf(2, "%s modified since last run (%f vs %f), checking for new files\n", self.path,
                 self.getmtime(), self.mtime)
         for fname in self.fileset:
             # ignore files from ignore list
             if matches_patterns(fname, ignore_patterns) and not matches_patterns(fname, watch_patterns):
                 dprintf(5, "%s: matches ignore list but not watch list, skipping\n", fname)
                 continue
             fullname = os.path.join(self.path, fname)
             # check creation time against our timestamp
             try:
                 ctime = os.path.getctime(fullname)
             except:
                 _printexc("Error getting ctime for %s, ignoring", fname)
                 continue
             if ctime > self.mtime:
                 dprintf(4, "%s: new file (created %s)\n", fullname,
                         time.strftime("%x %X", time.localtime(ctime)))
                 # append basename to _newfiles: full path added in newFiles() below
                 self._newfiles.append(fname)
コード例 #23
0
ファイル: LogEntry.py プロジェクト: kernsuite-debian/purr
 def save(self, dirname=None, refresh=0, refresh_index=True, emit_message=True):
     """Saves entry in the given directory. Data products will be copied over if not
     residing in that directory.
     'refresh' is a timestamp, passed to renderIndex(), causing all data products OLDER than the specified time to be regenerated.
     'refresh_index', if true, causes index files to be re-rendered unconditionally
     """
     if not refresh and not self.updated:
         return
     timestr = time.strftime("%Y%m%d-%H%M%S", time.localtime(self.timestamp))
     Purr.progressMessage("Rendering entry for %s" % timestr)
     if dirname:
         self.pathname = pathname = os.path.join(dirname, "%s-%s" %
                                                 (("ignore" if self.ignore else "entry"), timestr))
     elif not self.pathname:
         raise ValueError("Cannot save entry: pathname not specified")
     else:
         pathname = self.pathname
     # set timestamp
     if not self.timestamp:
         self.timestamp = int(time.time())
     # get canonized path to output directory
     pathname = Purr.canonizePath(pathname)
     if not os.path.exists(pathname):
         os.mkdir(pathname)
     # now save content
     # get device of pathname -- need to know whether we move or copy
     devnum = os.stat(pathname).st_dev
     # copy data products as needed
     dprintf(2, "saving entry %s, %d data products\n", pathname, len(self.dps))
     dps = []
     for dp in self.dps:
         # if archived, this indicates a previously saved data product, so ignore it
         # if ignored, no need to save the DP -- but keep it in list
         if dp.archived or dp.ignored:
             dprintf(3, "dp %s is archived or ignored, skipping\n", dp.sourcepath)
             dps.append(dp)
             continue
         # file missing for some reason (perhaps it got removed on us?) skip data product entirely
         if not os.path.exists(dp.sourcepath):
             dprintf(2, "data product %s missing, ignoring\n", dp.sourcepath)
             continue
         Purr.progressMessage("archiving %s" % dp.filename, sub=True)
         # get normalized source and destination paths
         dprintf(2, "data product: %s, rename %s, policy %s\n", dp.sourcepath, dp.filename, dp.policy)
         sourcepath = Purr.canonizePath(dp.sourcepath)
         destname = dp.fullpath = os.path.join(pathname, dp.filename)
         dprintf(2, "data product: %s -> %s\n", sourcepath, destname)
         # does the destination product already exist? skip if same file, else remove
         if os.path.exists(destname):
             if os.path.samefile(destname, sourcepath):
                 dprintf(2, "same file, skipping\n")
                 dp.timestamp = os.path.getmtime(destname)
                 dps.append(dp)
                 continue
             if os.system("/bin/rm -fr '%s'" % destname):
                 print("Error removing %s, which is in the way of %s" % (destname, sourcepath))
                 print("This data product is not saved.")
                 continue
         # for directories, compress with tar
         if os.path.isdir(sourcepath):
             sourcepath = sourcepath.rstrip('/')
             if dp.policy == "copy" or dp.policy.startswith("move"):
                 dprintf(2, "archiving to tgz\n")
                 if os.system("tar zcf '%s' -C '%s' '%s'" % (destname,
                                                             os.path.dirname(sourcepath),
                                                             os.path.basename(sourcepath))):
                     print("Error archiving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
                 if dp.policy.startswith("move"):
                     os.system("/bin/rm -fr '%s'" % sourcepath)
         # else just a file
         else:
             # now copy/move it over
             if dp.policy == "copy":
                 dprintf(2, "copying\n")
                 if _copy_update(sourcepath, destname):
                     print("Error copying %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
             elif dp.policy.startswith('move'):
                 if _move_update(sourcepath, destname):
                     print("Error moving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
         # success, set timestamp and append
         dp.timestamp = os.path.getmtime(destname)
         dp.archived = True
         dps.append(dp)
     # reset list of data products
     self.dps = dps
     # now write out content
     self.cached_include = os.path.join(pathname, 'index.include.html')
     self.cached_include_valid = False
     self.index_file = os.path.join(pathname, "index.html")
     self.generateIndex(refresh=refresh, refresh_index=refresh_index and time.time())
     self.updated = False
コード例 #24
0
 def rescan(self):
     """Checks files and directories on watchlist for updates, rescans them for new data products.
     If any are found, returns them. Skips those in directories whose watchingState is set to Purr.UNWATCHED.
     """
     if not self.attached:
         return
     dprint(5, "starting rescan")
     newstuff = {}
     # this accumulates names of new or changed files. Keys are paths, values are 'quiet' flag.
     # store timestamp of scan
     self.last_scan_timestamp = time.time()
     # go through watched files/directories, check for mtime changes
     for path, watcher in list(self.watchers.items()):
         # get list of new files from watcher
         newfiles = watcher.newFiles()
         # None indicates access error, so drop it from watcher set
         if newfiles is None:
             if watcher.survive_deletion:
                 dprintf(5,
                         "access error on %s, but will still be watched\n",
                         watcher.path)
             else:
                 dprintf(2,
                         "access error on %s, will no longer be watched\n",
                         watcher.path)
                 del self.watchers[path]
             if not watcher.disappeared:
                 self.emit(SIGNAL("disappearedFile"), path)
                 watcher.disappeared = True
             continue
         dprintf(5, "%s: %d new file(s)\n", watcher.path, len(newfiles))
         # if a file has its own watcher, and is independently reported by a directory watcher, skip the directory's
         # version and let the file's watcher report it. Reason for this is that the file watcher may have a more
         # up-to-date timestamp, so we trust it over the dir watcher.
         newfiles = [
             p for p in newfiles if p is path or p not in self.watchers
         ]
         # skip files in self._unwatched_paths
         newfiles = [
             filename for filename in newfiles if self._watching_state.get(
                 os.path.dirname(filename)) > Purr.UNWATCHED
         ]
         # Now go through files and add them to the newstuff dict
         for newfile in newfiles:
             # if quiet flag is explicitly set on watcher, enforce it
             # if not pouncing on directory, also add quietly
             if watcher.quiet or self._watching_state.get(
                     os.path.dirname(newfile)) < Purr.POUNCE:
                 quiet = True
             # else add quietly if file is not in the quiet patterns
             else:
                 quiet = matches_patterns(os.path.basename(newfile),
                                          self._quiet_patterns)
             # add file to list of new products. Since a file may be reported by multiple
             # watchers, make the quiet flag a logical AND of all the quiet flags (i.e. DP will be
             # marked as quiet only if all watchers report it as quiet).
             newstuff[newfile] = quiet and newstuff.get(newfile, True)
             dprintf(
                 4, "%s: new data product, quiet=%d (watcher quiet: %s)\n",
                 newfile, quiet, watcher.quiet)
             # add a watcher for this file to the temp_watchers list. this is used below
             # to detect renamed and deleted files
             self.temp_watchers[newfile] = Purrer.WatchedFile(newfile)
     # now, go through temp_watchers to see if any newly pounced-on files have disappeared
     for path, watcher in list(self.temp_watchers.items()):
         # get list of new files from watcher
         if watcher.newFiles() is None:
             dprintf(2, "access error on %s, marking as disappeared",
                     watcher.path)
             del self.temp_watchers[path]
             self.emit(SIGNAL("disappearedFile"), path)
     # if we have new data products, send them to the main window
     return self.makeDataProducts(iter(newstuff.items()))
コード例 #25
0
 def addWatchedDirectory(self,
                         dirname,
                         watching=Purr.WATCHED,
                         save_config=True):
     """Starts watching the specified directories for changes"""
     # see if we're alredy watching this exact set of directories -- do nothing if so
     dirname = Purr.canonizePath(dirname)
     # do nothing if already watching
     if dirname in self.watched_dirs:
         dprint(1, "addWatchDirectory(): already watching %s\n", dirname)
         # watching=None means do not change the watch-state
         if watching is None:
             return
     else:
         if watching is None:
             watching = Purr.WATCHED
         # make watcher object
         wdir = Purrer.WatchedDir(dirname,
                                  mtime=self.timestamp,
                                  watch_patterns=self._watch_patterns,
                                  ignore_patterns=self._ignore_patterns)
         # fileset=None indicates error reading directory, so ignore it
         if wdir.fileset is None:
             print(
                 "There was an error reading the directory %s, will stop watching it."
                 % dirname)
             self.setWatchingState(dirname, Purr.REMOVED, save_config=True)
             return
         self.watchers[dirname] = wdir
         self.watched_dirs.append(dirname)
         dprintf(2, "watching directory %s, mtime %s, %d files\n", dirname,
                 time.strftime("%x %X", time.localtime(wdir.mtime)),
                 len(wdir.fileset))
         # find files in this directory matching the watch_patterns, and watch them for changes
         watchset = set()
         for patt in self._watch_patterns:
             watchset.update(fnmatch.filter(wdir.fileset, patt))
         for fname in watchset:
             quiet = matches_patterns(fname, self._quiet_patterns)
             fullname = Purr.canonizePath(os.path.join(dirname, fname))
             if fullname not in self.watchers:
                 wfile = Purrer.WatchedFile(fullname,
                                            quiet=quiet,
                                            mtime=self.timestamp)
                 self.watchers[fullname] = wfile
                 dprintf(
                     3, "watching file %s, timestamp %s, quiet %d\n",
                     fullname,
                     time.strftime("%x %X",
                                   time.localtime(wfile.mtime)), quiet)
         # find subdirectories  matching the subdir_patterns, and watch them for changes
         for fname in wdir.fileset:
             fullname = Purr.canonizePath(os.path.join(dirname, fname))
             if os.path.isdir(fullname):
                 for desc, dir_patts, canary_patts in self._subdir_patterns:
                     if matches_patterns(fname, dir_patts):
                         quiet = matches_patterns(fname,
                                                  self._quiet_patterns)
                         wdir = Purrer.WatchedSubdir(
                             fullname,
                             canary_patterns=canary_patts,
                             quiet=quiet,
                             mtime=self.timestamp)
                         self.watchers[fullname] = wdir
                         dprintf(
                             3,
                             "watching subdirectory %s/{%s}, timestamp %s, quiet %d\n",
                             fullname, ",".join(canary_patts),
                             time.strftime("%x %X",
                                           time.localtime(wdir.mtime)),
                             quiet)
                         break
     # set state and save config
     self.setWatchingState(dirname, watching, save_config=save_config)
コード例 #26
0
ファイル: LogEntry.py プロジェクト: kernsuite-debian/purr
    def renderIndex(self, relpath="", refresh=0, refresh_index=0):
        """Returns HTML index code for this entry.
        If 'relpath' is empty, renders complete index.html file.
        If 'relpath' is not empty, then index is being included into a top-level log, and
        relpath should be passed to all sub-renderers.
        In this case the entry may make use of its cached_include file, if that is valid.
        If 'refresh' is set to a timestamp, then any subproducts (thumbnails, HTML caches, etc.) older than the timestamp will need to be regenerated.
        If 'refresh_index' is set to a timestamp, then any index files older than the timestamp will need to be regenerated.
        If 'relpath' is empty and 'prev', 'next' and/or 'up' is set, then Prev/Next/Up links will be inserted
        """
        # check if cache can be used
        refresh_index = max(refresh, refresh_index)
        dprintf(2, "%s: rendering HTML index with relpath='%s', refresh=%s refresh_index=%s\n", self.pathname, relpath,
                time.strftime("%x %X", time.localtime(refresh)),
                time.strftime("%x %X", time.localtime(refresh_index)))
        if relpath and self.cached_include_valid:
            try:
                if os.path.getmtime(self.cached_include) >= refresh_index:
                    dprintf(2, "using include cache %s\n", self.cached_include)
                    return open(self.cached_include).read()
                else:
                    dprintf(2, "include cache %s out of date, will regenerate\n", self.cached_include)
                    self.cached_include_valid = False
            except:
                print("Error reading cached include code from %s, will regenerate" % self.cached_include)
                if verbosity.get_verbose() > 0:
                    dprint(1, "Error traceback follows:")
                    traceback.print_exc()
                self.cached_include_valid = False
        # form up attributes for % operator
        attrs = dict(self.__dict__)
        attrs['timestr'] = time.strftime("%x %X", time.localtime(self.timestamp))
        attrs['relpath'] = relpath
        html = ""
        # replace title and comments for ignored entries
        if self.ignore:
            attrs['title'] = "This is not a real log entry"
            attrs['comment'] = """This entry was saved by PURR because the user
      chose to ignore and/or banish some data products. PURR has stored this
      information here for its opwn internal and highly nefarious purposes.
      This entry is will not appear in the log."""
        # replace < and > in title and comments
        attrs['title'] = attrs['title'].replace("<", "&lt;").replace(">", "&gt;")
        # write header if asked
        if not relpath:
            icon = Purr.RenderIndex.renderIcon(24, "..")
            html += """<HTML><BODY>
      <TITLE>%(title)s</TITLE>""" % attrs
            if self._prev_link or self._next_link or self._up_link:
                html += """<DIV ALIGN=right><P>%s %s %s</P></DIV>""" % (
                    (self._prev_link and "<A HREF=\"%s\">&lt;&lt;Previous</A>" % self._prev_link) or "",
                    (self._up_link and "<A HREF=\"%s\">Up</A>" % self._up_link) or "",
                    (self._next_link and "<A HREF=\"%s\">Next&gt;&gt;</A>" % self._next_link) or ""
                )
            html += ("<H2>" + icon + """ <A CLASS="TITLE" TIMESTAMP=%(timestamp)d>%(title)s</A></H2>""") % attrs
        else:
            icon = Purr.RenderIndex.renderIcon(24)
            html += """
        <HR WIDTH=100%%>
        <H2>""" + icon + """ %(title)s</H2>""" % attrs
        # write comments
        html += """
        <DIV ALIGN=right><P><SMALL>Logged on %(timestr)s</SMALL></P></DIV>\n

        <A CLASS="COMMENTS">\n""" % attrs
        # add comments
        logmode = False
        for cmt in self.comment.split("\n"):
            cmt = cmt.replace("<", "&lt;").replace(">", "&gt;").replace("&lt;BR&gt;", "<BR>")
            html += """      <P>%s</P>\n""" % cmt
        html += """    </A>\n"""
        # add data products
        if self.dps:
            have_real_dps = bool([dp for dp in self.dps if not dp.ignored])
            if have_real_dps:
                html += """
        <H3>Data products</H3>
        <TABLE BORDER=1 FRAME=box RULES=all CELLPADDING=5>\n"""
            for dp in self.dps:
                dpattrs = dict(dp.__dict__)
                dpattrs['comment'] = dpattrs['comment'].replace("<", "&lt;"). \
                    replace(">", "&gt;").replace('"', "''")
                # if generating complete index, write empty anchor for each DP
                if not relpath:
                    if dp.ignored:
                        html += """
            <A CLASS="DP" SRC="%(sourcepath)s" POLICY="%(policy)s" COMMENT="%(comment)s"></A>\n""" % dpattrs
                    # write normal anchor for normal products
                    else:
                        dpattrs['relpath'] = relpath
                        dpattrs['basename'] = os.path.basename(dp.filename)
                        html += """
            <A CLASS="DP" FILENAME="%(filename)s" SRC="%(sourcepath)s" POLICY="%(policy)s" QUIET=%(quiet)d TIMESTAMP=%(timestamp).6f RENDER="%(render)s" COMMENT="%(comment)s"></A>\n""" % dpattrs
                # render a table row
                if not dp.ignored:
                    renderer = Purr.Render.makeRenderer(dp.render, dp, refresh=refresh)
                    html += Purr.Render.renderInTable(renderer, relpath)
            if have_real_dps:
                html += """
        </TABLE>"""
        # write footer
        if not relpath:
            html += "</BODY></HTML>\n"
        else:
            # now, write to include cache, if being included
            open(self.cached_include, 'w').write(html)
            self.cached_include_valid = True
        return html
コード例 #27
0
 def _attach(self, purrlog, watchdirs=None):
     """Attaches Purr to a purrlog directory, and loads content.
     Returns False if nothing new has been loaded (because directory is the same),
     or True otherwise."""
     purrlog = os.path.abspath(purrlog)
     dprint(1, "attaching to purrlog", purrlog)
     self.logdir = purrlog
     self.indexfile = os.path.join(self.logdir, "index.html")
     self.logtitle = "Unnamed log"
     self.timestamp = self.last_scan_timestamp = time.time()
     self._initIndexDir()
     # reset internal state
     self.ignorelistfile = None
     self.autopounce = False
     self.watched_dirs = []
     self.entries = []
     self._default_dp_props = {}
     self.watchers = {}
     self.temp_watchers = {}
     self.attached = False
     self._watching_state = {}
     # check that we hold a lock on the directory
     self.lockfile = os.path.join(self.logdir, ".purrlock")
     # try to open lock file for r/w
     try:
         self.lockfile_fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT)
     except:
         raise Purrer.LockFailError(
             "failed to open lock file %s for writing" % self.lockfile)
     # try to acquire lock on the lock file
     try:
         fcntl.lockf(self.lockfile_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
     except:
         other_lock = os.fdopen(self.lockfile_fd, 'r').read()
         self.lockfile_fd = None
         raise Purrer.LockedError(other_lock)
     # got lock, write our ID to the lock file
     global _lockstring
     try:
         self.lockfile_fobj = os.fdopen(self.lockfile_fd, 'w')
         self.lockfile_fobj.write(_lockstring)
         self.lockfile_fobj.flush()
         os.fsync(self.lockfile_fd)
     except:
         raise
     #      raise Purrer.LockFailError("cannot write to lock file %s"%self.lockfile)
     # load log state if log directory already exists
     if os.path.exists(self.logdir):
         _busy = Purr.BusyIndicator()
         if os.path.exists(self.indexfile):
             try:
                 parser = Purr.Parsers.LogIndexParser()
                 for line in open(self.indexfile):
                     parser.feed(line)
                 self.logtitle = parser.title or self.logtitle
                 self.timestamp = parser.timestamp or self.timestamp
                 dprintf(
                     2, "attached log '%s', timestamp %s\n", self.logtitle,
                     time.strftime("%x %X", time.localtime(self.timestamp)))
             except:
                 traceback.print_exc()
                 print("Error parsing %s, reverting to defaults" %
                       self.indexfile)
         # load log entries
         entries = []
         for fname in os.listdir(self.logdir):
             pathname = os.path.join(self.logdir, fname)
             if Purr.LogEntry.isValidPathname(pathname):
                 try:
                     entry = Purr.LogEntry(load=pathname)
                     dprint(2, "loaded log entry", pathname)
                 except:
                     print("Error loading entry %s, skipping" % fname)
                     traceback.print_exc()
                     continue
                 entries.append(entry)
             else:
                 dprint(2, fname, "is not a valid Purr entry")
         # sort log entires by timestamp
         entries.sort(lambda a, b: cmp(a.timestamp, b.timestamp))
         self.setLogEntries(entries, save=False)
         # update own timestamp
         if entries:
             self.timestamp = max(self.timestamp, entries[-1].timestamp)
     # else logfile doesn't exist, create it
     else:
         self._initIndexDir()
     # load configuration if it exists
     # init config file
     self.dirconfig = configparser.RawConfigParser()
     self.dirconfigfile = os.path.join(self.logdir, "dirconfig")
     if os.path.exists(self.dirconfigfile):
         try:
             self.dirconfig.read(self.dirconfigfile)
         except:
             print("Error loading config file %s" % self.dirconfigfile)
             traceback.print_exc()
         # load directory configuration
         for dirname in self.dirconfig.sections():
             try:
                 watching = self.dirconfig.getint(dirname, "watching")
             except:
                 watching = Purr.WATCHED
             dirname = os.path.expanduser(dirname)
             self.addWatchedDirectory(dirname, watching, save_config=False)
     # start watching the specified directories
     for name in (watchdirs or []):
         self.addWatchedDirectory(name, watching=None)
     # Finally, go through list of ignored files and mark their watchers accordingly.
     # The ignorelist is a list of lines of the form "timestamp filename", giving the timestamp when a
     # file was last "ignored" by the purrlog user.
     self.ignorelistfile = os.path.join(self.logdir, "ignorelist")
     if os.path.exists(self.ignorelistfile):
         # read lines from file, ignore exceptions
         ignores = {}
         try:
             for line in open(self.ignorelistfile).readlines():
                 timestamp, policy, filename = line.strip().split(" ", 2)
                 # update dictiornary with latest timestamp
                 ignores[filename] = int(timestamp), policy
         except:
             print("Error reading %s" % self.ignorelistfile)
             traceback.print_exc()
         # now scan all listed files, and make sure their watchers' mtime is no older than the given
         # last-ignore-timestamp. This ensures that we don't pounce on these files after restarting purr.
         for filename, (timestamp, policy) in ignores.items():
             watcher = self.watchers.get(filename, None)
             if watcher:
                 watcher.mtime = max(watcher.mtime, timestamp)
     # init complete
     self.attached = True
     return True
コード例 #28
0
 def save(self,
          dirname=None,
          refresh=0,
          refresh_index=True,
          emit_message=True):
     """Saves entry in the given directory. Data products will be copied over if not
     residing in that directory.
     'refresh' is a timestamp, passed to renderIndex(), causing all data products OLDER than the specified time to be regenerated.
     'refresh_index', if true, causes index files to be re-rendered unconditionally
     """
     if not refresh and not self.updated:
         return
     timestr = time.strftime("%Y%m%d-%H%M%S",
                             time.localtime(self.timestamp))
     Purr.progressMessage("Rendering entry for %s" % timestr)
     if dirname:
         self.pathname = pathname = os.path.join(
             dirname,
             "%s-%s" % (("ignore" if self.ignore else "entry"), timestr))
     elif not self.pathname:
         raise ValueError("Cannot save entry: pathname not specified")
     else:
         pathname = self.pathname
     # set timestamp
     if not self.timestamp:
         self.timestamp = int(time.time())
     # get canonized path to output directory
     pathname = Purr.canonizePath(pathname)
     if not os.path.exists(pathname):
         os.mkdir(pathname)
     # now save content
     # get device of pathname -- need to know whether we move or copy
     devnum = os.stat(pathname).st_dev
     # copy data products as needed
     dprintf(2, "saving entry %s, %d data products\n", pathname,
             len(self.dps))
     dps = []
     for dp in self.dps:
         # if archived, this indicates a previously saved data product, so ignore it
         # if ignored, no need to save the DP -- but keep it in list
         if dp.archived or dp.ignored:
             dprintf(3, "dp %s is archived or ignored, skipping\n",
                     dp.sourcepath)
             dps.append(dp)
             continue
         # file missing for some reason (perhaps it got removed on us?) skip data product entirely
         if not os.path.exists(dp.sourcepath):
             dprintf(2, "data product %s missing, ignoring\n",
                     dp.sourcepath)
             continue
         Purr.progressMessage("archiving %s" % dp.filename, sub=True)
         # get normalized source and destination paths
         dprintf(2, "data product: %s, rename %s, policy %s\n",
                 dp.sourcepath, dp.filename, dp.policy)
         sourcepath = Purr.canonizePath(dp.sourcepath)
         destname = dp.fullpath = os.path.join(pathname, dp.filename)
         dprintf(2, "data product: %s -> %s\n", sourcepath, destname)
         # does the destination product already exist? skip if same file, else remove
         if os.path.exists(destname):
             if os.path.samefile(destname, sourcepath):
                 dprintf(2, "same file, skipping\n")
                 dp.timestamp = os.path.getmtime(destname)
                 dps.append(dp)
                 continue
             if os.system("/bin/rm -fr '%s'" % destname):
                 print("Error removing %s, which is in the way of %s" %
                       (destname, sourcepath))
                 print("This data product is not saved.")
                 continue
         # for directories, compress with tar
         if os.path.isdir(sourcepath):
             sourcepath = sourcepath.rstrip('/')
             if dp.policy == "copy" or dp.policy.startswith("move"):
                 dprintf(2, "archiving to tgz\n")
                 if os.system("tar zcf '%s' -C '%s' '%s'" %
                              (destname, os.path.dirname(sourcepath),
                               os.path.basename(sourcepath))):
                     print("Error archiving %s to %s" %
                           (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
                 if dp.policy.startswith("move"):
                     os.system("/bin/rm -fr '%s'" % sourcepath)
         # else just a file
         else:
             # now copy/move it over
             if dp.policy == "copy":
                 dprintf(2, "copying\n")
                 if _copy_update(sourcepath, destname):
                     print("Error copying %s to %s" %
                           (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
             elif dp.policy.startswith('move'):
                 if _move_update(sourcepath, destname):
                     print("Error moving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
         # success, set timestamp and append
         dp.timestamp = os.path.getmtime(destname)
         dp.archived = True
         dps.append(dp)
     # reset list of data products
     self.dps = dps
     # now write out content
     self.cached_include = os.path.join(pathname, 'index.include.html')
     self.cached_include_valid = False
     self.index_file = os.path.join(pathname, "index.html")
     self.generateIndex(refresh=refresh,
                        refresh_index=refresh_index and time.time())
     self.updated = False
コード例 #29
0
    def renderIndex(self, relpath="", refresh=0, refresh_index=0):
        """Returns HTML index code for this entry.
        If 'relpath' is empty, renders complete index.html file.
        If 'relpath' is not empty, then index is being included into a top-level log, and
        relpath should be passed to all sub-renderers.
        In this case the entry may make use of its cached_include file, if that is valid.
        If 'refresh' is set to a timestamp, then any subproducts (thumbnails, HTML caches, etc.) older than the timestamp will need to be regenerated.
        If 'refresh_index' is set to a timestamp, then any index files older than the timestamp will need to be regenerated.
        If 'relpath' is empty and 'prev', 'next' and/or 'up' is set, then Prev/Next/Up links will be inserted
        """
        # check if cache can be used
        refresh_index = max(refresh, refresh_index)
        dprintf(
            2,
            "%s: rendering HTML index with relpath='%s', refresh=%s refresh_index=%s\n",
            self.pathname, relpath,
            time.strftime("%x %X", time.localtime(refresh)),
            time.strftime("%x %X", time.localtime(refresh_index)))
        if relpath and self.cached_include_valid:
            try:
                if os.path.getmtime(self.cached_include) >= refresh_index:
                    dprintf(2, "using include cache %s\n", self.cached_include)
                    return open(self.cached_include).read()
                else:
                    dprintf(2,
                            "include cache %s out of date, will regenerate\n",
                            self.cached_include)
                    self.cached_include_valid = False
            except:
                print(
                    "Error reading cached include code from %s, will regenerate"
                    % self.cached_include)
                if verbosity.get_verbose() > 0:
                    dprint(1, "Error traceback follows:")
                    traceback.print_exc()
                self.cached_include_valid = False
        # form up attributes for % operator
        attrs = dict(self.__dict__)
        attrs['timestr'] = time.strftime("%x %X",
                                         time.localtime(self.timestamp))
        attrs['relpath'] = relpath
        html = ""
        # replace title and comments for ignored entries
        if self.ignore:
            attrs['title'] = "This is not a real log entry"
            attrs['comment'] = """This entry was saved by PURR because the user
      chose to ignore and/or banish some data products. PURR has stored this
      information here for its opwn internal and highly nefarious purposes.
      This entry is will not appear in the log."""
        # replace < and > in title and comments
        attrs['title'] = attrs['title'].replace("<",
                                                "&lt;").replace(">", "&gt;")
        # write header if asked
        if not relpath:
            icon = Purr.RenderIndex.renderIcon(24, "..")
            html += """<HTML><BODY>
      <TITLE>%(title)s</TITLE>""" % attrs
            if self._prev_link or self._next_link or self._up_link:
                html += """<DIV ALIGN=right><P>%s %s %s</P></DIV>""" % (
                    (self._prev_link and "<A HREF=\"%s\">&lt;&lt;Previous</A>"
                     % self._prev_link) or "",
                    (self._up_link and "<A HREF=\"%s\">Up</A>" % self._up_link)
                    or "",
                    (self._next_link and "<A HREF=\"%s\">Next&gt;&gt;</A>" %
                     self._next_link) or "")
            html += (
                "<H2>" + icon +
                """ <A CLASS="TITLE" TIMESTAMP=%(timestamp)d>%(title)s</A></H2>"""
            ) % attrs
        else:
            icon = Purr.RenderIndex.renderIcon(24)
            html += """
        <HR WIDTH=100%%>
        <H2>""" + icon + """ %(title)s</H2>""" % attrs
        # write comments
        html += """
        <DIV ALIGN=right><P><SMALL>Logged on %(timestr)s</SMALL></P></DIV>\n

        <A CLASS="COMMENTS">\n""" % attrs
        # add comments
        logmode = False
        for cmt in self.comment.split("\n"):
            cmt = cmt.replace("<", "&lt;").replace(">", "&gt;").replace(
                "&lt;BR&gt;", "<BR>")
            html += """      <P>%s</P>\n""" % cmt
        html += """    </A>\n"""
        # add data products
        if self.dps:
            have_real_dps = bool([dp for dp in self.dps if not dp.ignored])
            if have_real_dps:
                html += """
        <H3>Data products</H3>
        <TABLE BORDER=1 FRAME=box RULES=all CELLPADDING=5>\n"""
            for dp in self.dps:
                dpattrs = dict(dp.__dict__)
                dpattrs['comment'] = dpattrs['comment'].replace("<", "&lt;"). \
                    replace(">", "&gt;").replace('"', "''")
                # if generating complete index, write empty anchor for each DP
                if not relpath:
                    if dp.ignored:
                        html += """
            <A CLASS="DP" SRC="%(sourcepath)s" POLICY="%(policy)s" COMMENT="%(comment)s"></A>\n""" % dpattrs
                    # write normal anchor for normal products
                    else:
                        dpattrs['relpath'] = relpath
                        dpattrs['basename'] = os.path.basename(dp.filename)
                        html += """
            <A CLASS="DP" FILENAME="%(filename)s" SRC="%(sourcepath)s" POLICY="%(policy)s" QUIET=%(quiet)d TIMESTAMP=%(timestamp).6f RENDER="%(render)s" COMMENT="%(comment)s"></A>\n""" % dpattrs
                # render a table row
                if not dp.ignored:
                    renderer = Purr.Render.makeRenderer(dp.render,
                                                        dp,
                                                        refresh=refresh)
                    html += Purr.Render.renderInTable(renderer, relpath)
            if have_real_dps:
                html += """
        </TABLE>"""
        # write footer
        if not relpath:
            html += "</BODY></HTML>\n"
        else:
            # now, write to include cache, if being included
            open(self.cached_include, 'w').write(html)
            self.cached_include_valid = True
        return html
コード例 #30
0
ファイル: Purrer.py プロジェクト: kernsuite-debian/purr
 def _attach(self, purrlog, watchdirs=None):
     """Attaches Purr to a purrlog directory, and loads content.
     Returns False if nothing new has been loaded (because directory is the same),
     or True otherwise."""
     purrlog = os.path.abspath(purrlog)
     dprint(1, "attaching to purrlog", purrlog)
     self.logdir = purrlog
     self.indexfile = os.path.join(self.logdir, "index.html")
     self.logtitle = "Unnamed log"
     self.timestamp = self.last_scan_timestamp = time.time()
     self._initIndexDir()
     # reset internal state
     self.ignorelistfile = None
     self.autopounce = False
     self.watched_dirs = []
     self.entries = []
     self._default_dp_props = {}
     self.watchers = {}
     self.temp_watchers = {}
     self.attached = False
     self._watching_state = {}
     # check that we hold a lock on the directory
     self.lockfile = os.path.join(self.logdir, ".purrlock")
     # try to open lock file for r/w
     try:
         self.lockfile_fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT)
     except:
         raise Purrer.LockFailError("failed to open lock file %s for writing" % self.lockfile)
     # try to acquire lock on the lock file
     try:
         fcntl.lockf(self.lockfile_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
     except:
         other_lock = os.fdopen(self.lockfile_fd, 'r').read()
         self.lockfile_fd = None
         raise Purrer.LockedError(other_lock)
     # got lock, write our ID to the lock file
     global _lockstring
     try:
         self.lockfile_fobj = os.fdopen(self.lockfile_fd, 'w')
         self.lockfile_fobj.write(_lockstring)
         self.lockfile_fobj.flush()
         os.fsync(self.lockfile_fd)
     except:
         raise
     #      raise Purrer.LockFailError("cannot write to lock file %s"%self.lockfile)
     # load log state if log directory already exists
     if os.path.exists(self.logdir):
         _busy = Purr.BusyIndicator()
         if os.path.exists(self.indexfile):
             try:
                 parser = Purr.Parsers.LogIndexParser()
                 for line in open(self.indexfile):
                     parser.feed(line)
                 self.logtitle = parser.title or self.logtitle
                 self.timestamp = parser.timestamp or self.timestamp
                 dprintf(2, "attached log '%s', timestamp %s\n",
                         self.logtitle, time.strftime("%x %X", time.localtime(self.timestamp)))
             except:
                 traceback.print_exc()
                 print("Error parsing %s, reverting to defaults" % self.indexfile)
         # load log entries
         entries = []
         for fname in os.listdir(self.logdir):
             pathname = os.path.join(self.logdir, fname)
             if Purr.LogEntry.isValidPathname(pathname):
                 try:
                     entry = Purr.LogEntry(load=pathname)
                     dprint(2, "loaded log entry", pathname)
                 except:
                     print("Error loading entry %s, skipping" % fname)
                     traceback.print_exc()
                     continue
                 entries.append(entry)
             else:
                 dprint(2, fname, "is not a valid Purr entry")
         # sort log entires by timestamp
         entries.sort(lambda a, b: cmp(a.timestamp, b.timestamp))
         self.setLogEntries(entries, save=False)
         # update own timestamp
         if entries:
             self.timestamp = max(self.timestamp, entries[-1].timestamp)
     # else logfile doesn't exist, create it
     else:
         self._initIndexDir()
     # load configuration if it exists
     # init config file
     self.dirconfig = configparser.RawConfigParser()
     self.dirconfigfile = os.path.join(self.logdir, "dirconfig")
     if os.path.exists(self.dirconfigfile):
         try:
             self.dirconfig.read(self.dirconfigfile)
         except:
             print("Error loading config file %s" % self.dirconfigfile)
             traceback.print_exc()
         # load directory configuration
         for dirname in self.dirconfig.sections():
             try:
                 watching = self.dirconfig.getint(dirname, "watching")
             except:
                 watching = Purr.WATCHED
             dirname = os.path.expanduser(dirname)
             self.addWatchedDirectory(dirname, watching, save_config=False)
     # start watching the specified directories
     for name in (watchdirs or []):
         self.addWatchedDirectory(name, watching=None)
     # Finally, go through list of ignored files and mark their watchers accordingly.
     # The ignorelist is a list of lines of the form "timestamp filename", giving the timestamp when a
     # file was last "ignored" by the purrlog user.
     self.ignorelistfile = os.path.join(self.logdir, "ignorelist")
     if os.path.exists(self.ignorelistfile):
         # read lines from file, ignore exceptions
         ignores = {}
         try:
             for line in open(self.ignorelistfile).readlines():
                 timestamp, policy, filename = line.strip().split(" ", 2)
                 # update dictiornary with latest timestamp
                 ignores[filename] = int(timestamp), policy
         except:
             print("Error reading %s" % self.ignorelistfile)
             traceback.print_exc()
         # now scan all listed files, and make sure their watchers' mtime is no older than the given
         # last-ignore-timestamp. This ensures that we don't pounce on these files after restarting purr.
         for filename, (timestamp, policy) in ignores.items():
             watcher = self.watchers.get(filename, None)
             if watcher:
                 watcher.mtime = max(watcher.mtime, timestamp)
     # init complete
     self.attached = True
     return True