示例#1
0
 def addWatchedDirectory(self, dirname, watching=Purr.WATCHED, save_config=True):
     """Starts watching the specified directories for changes"""
     # see if we're alredy watching this exact set of directories -- do nothing if so
     dirname = Purr.canonizePath(dirname)
     # do nothing if already watching
     if dirname in self.watched_dirs:
         dprint(1, "addWatchDirectory(): already watching %s\n", dirname)
         # watching=None means do not change the watch-state
         if watching is None:
             return
     else:
         if watching is None:
             watching = Purr.WATCHED
         # make watcher object
         wdir = Purrer.WatchedDir(dirname, mtime=self.timestamp,
                                  watch_patterns=self._watch_patterns, ignore_patterns=self._ignore_patterns)
         # fileset=None indicates error reading directory, so ignore it
         if wdir.fileset is None:
             print("There was an error reading the directory %s, will stop watching it." % dirname)
             self.setWatchingState(dirname, Purr.REMOVED, save_config=True)
             return
         self.watchers[dirname] = wdir
         self.watched_dirs.append(dirname)
         dprintf(2, "watching directory %s, mtime %s, %d files\n",
                 dirname, time.strftime("%x %X", time.localtime(wdir.mtime)), len(wdir.fileset))
         # find files in this directory matching the watch_patterns, and watch them for changes
         watchset = set()
         for patt in self._watch_patterns:
             watchset.update(fnmatch.filter(wdir.fileset, patt))
         for fname in watchset:
             quiet = matches_patterns(fname, self._quiet_patterns)
             fullname = Purr.canonizePath(os.path.join(dirname, fname))
             if fullname not in self.watchers:
                 wfile = Purrer.WatchedFile(fullname, quiet=quiet, mtime=self.timestamp)
                 self.watchers[fullname] = wfile
                 dprintf(3, "watching file %s, timestamp %s, quiet %d\n",
                         fullname, time.strftime("%x %X", time.localtime(wfile.mtime)), quiet)
         # find subdirectories  matching the subdir_patterns, and watch them for changes
         for fname in wdir.fileset:
             fullname = Purr.canonizePath(os.path.join(dirname, fname))
             if os.path.isdir(fullname):
                 for desc, dir_patts, canary_patts in self._subdir_patterns:
                     if matches_patterns(fname, dir_patts):
                         quiet = matches_patterns(fname, self._quiet_patterns)
                         wdir = Purrer.WatchedSubdir(fullname, canary_patterns=canary_patts, quiet=quiet,
                                                     mtime=self.timestamp)
                         self.watchers[fullname] = wdir
                         dprintf(3, "watching subdirectory %s/{%s}, timestamp %s, quiet %d\n",
                                 fullname, ",".join(canary_patts),
                                 time.strftime("%x %X", time.localtime(wdir.mtime)), quiet)
                         break
     # set state and save config
     self.setWatchingState(dirname, watching, save_config=save_config)
示例#2
0
 def makeDataProducts(self, files, unbanish=False, unignore=False):
     """makes a list of DPs from a list of (filename,quiet) pairs.
     If unbanish is False, DPs with a default "banish" policy will be skipped.
     Symlinks will be resolved, and non-unique filenames removed from list.
     """
     paths = set()
     dps = []
     for filename, quiet in files:
         filename = filename.rstrip('/')
         sourcepath = Purr.canonizePath(filename)
         if sourcepath not in paths:
             paths.add(sourcepath)
             filename = os.path.basename(filename)
             policy, filename, comment = self._default_dp_props.get(
                 filename, ("copy", filename, ""))
             dprintf(4, "%s: default policy is %s,%s,%s\n", sourcepath,
                     policy, filename, comment)
             if policy == "banish":
                 if unbanish:
                     policy = "copy"
                 else:
                     continue
             if unignore and policy == "ignore":
                 policy = "copy"
             dps.append(
                 Purr.DataProduct(filename=filename,
                                  sourcepath=sourcepath,
                                  policy=policy,
                                  comment=comment,
                                  quiet=quiet))
     return sorted(dps, lambda a, b: cmp(a.filename, b.filename))
示例#3
0
 def makeDataProducts(self, files, unbanish=False, unignore=False):
     """makes a list of DPs from a list of (filename,quiet) pairs.
     If unbanish is False, DPs with a default "banish" policy will be skipped.
     Symlinks will be resolved, and non-unique filenames removed from list.
     """
     paths = set()
     dps = []
     for filename, quiet in files:
         filename = filename.rstrip('/')
         sourcepath = Purr.canonizePath(filename)
         if sourcepath not in paths:
             paths.add(sourcepath)
             filename = os.path.basename(filename)
             policy, filename, comment = self._default_dp_props.get(filename, ("copy", filename, ""))
             dprintf(4, "%s: default policy is %s,%s,%s\n", sourcepath, policy, filename, comment)
             if policy == "banish":
                 if unbanish:
                     policy = "copy"
                 else:
                     continue
             if unignore and policy == "ignore":
                 policy = "copy"
             dps.append(Purr.DataProduct(filename=filename, sourcepath=sourcepath,
                                         policy=policy, comment=comment, quiet=quiet))
     return sorted(dps, lambda a, b: cmp(a.filename, b.filename))
示例#4
0
 def __init__(self,
              filename=None,
              sourcepath=None,
              fullpath=None,
              policy="copy",
              comment="",
              timestamp=None,
              render=None,
              quiet=False,
              archived=False):
     # This is the absolute pathname to the original data product
     self.sourcepath = Purr.canonizePath(sourcepath)
     # Base filename (w/o path) of data product within the log storage area.
     # Products may be renamed when they are moved or copied over to the log.
     self.filename = filename or (sourcepath
                                  and os.path.basename(sourcepath))
     # Full path to the DP within the log storage area.
     # This is None until a DP has been saved.
     self.fullpath = fullpath
     # Handling policy for DP: "copy","move","ignore", etc.
     self.policy = policy
     # Comment associated with DP
     self.comment = comment
     # Once a DP has been saved, this is the timestamp of data product at time of copy
     self.timestamp = timestamp
     # Name of renderer used to render this DP.
     self.render = render
     # if True, DP is watched quietly (i.e. Purr does not pop up windows on update)
     self.quiet = quiet
     # if True, DP has already been archived. This is False for new DPs until they're saved.
     self.archived = archived
     # if True, dp is ignored (policy is "ignore" or "banish")
     # not that policy should not be changed after a DP has been created
     self.ignored = policy in ("ignore", "banish")
示例#5
0
 def __init__(self, filename=None, sourcepath=None, fullpath=None,
              policy="copy", comment="",
              timestamp=None, render=None,
              quiet=False, archived=False):
     # This is the absolute pathname to the original data product
     self.sourcepath = Purr.canonizePath(sourcepath)
     # Base filename (w/o path) of data product within the log storage area.
     # Products may be renamed when they are moved or copied over to the log.
     self.filename = filename or (sourcepath and os.path.basename(sourcepath))
     # Full path to the DP within the log storage area.
     # This is None until a DP has been saved.
     self.fullpath = fullpath
     # Handling policy for DP: "copy","move","ignore", etc.
     self.policy = policy
     # Comment associated with DP
     self.comment = comment
     # Once a DP has been saved, this is the timestamp of data product at time of copy
     self.timestamp = timestamp
     # Name of renderer used to render this DP.
     self.render = render
     # if True, DP is watched quietly (i.e. Purr does not pop up windows on update)
     self.quiet = quiet
     # if True, DP has already been archived. This is False for new DPs until they're saved.
     self.archived = archived
     # if True, dp is ignored (policy is "ignore" or "banish")
     # not that policy should not be changed after a DP has been created
     self.ignored = policy in ("ignore", "banish")
示例#6
0
 def addWatchedDirectory(self,
                         dirname,
                         watching=Purr.WATCHED,
                         save_config=True):
     """Starts watching the specified directories for changes"""
     # see if we're alredy watching this exact set of directories -- do nothing if so
     dirname = Purr.canonizePath(dirname)
     # do nothing if already watching
     if dirname in self.watched_dirs:
         dprint(1, "addWatchDirectory(): already watching %s\n", dirname)
         # watching=None means do not change the watch-state
         if watching is None:
             return
     else:
         if watching is None:
             watching = Purr.WATCHED
         # make watcher object
         wdir = Purrer.WatchedDir(dirname,
                                  mtime=self.timestamp,
                                  watch_patterns=self._watch_patterns,
                                  ignore_patterns=self._ignore_patterns)
         # fileset=None indicates error reading directory, so ignore it
         if wdir.fileset is None:
             print(
                 "There was an error reading the directory %s, will stop watching it."
                 % dirname)
             self.setWatchingState(dirname, Purr.REMOVED, save_config=True)
             return
         self.watchers[dirname] = wdir
         self.watched_dirs.append(dirname)
         dprintf(2, "watching directory %s, mtime %s, %d files\n", dirname,
                 time.strftime("%x %X", time.localtime(wdir.mtime)),
                 len(wdir.fileset))
         # find files in this directory matching the watch_patterns, and watch them for changes
         watchset = set()
         for patt in self._watch_patterns:
             watchset.update(fnmatch.filter(wdir.fileset, patt))
         for fname in watchset:
             quiet = matches_patterns(fname, self._quiet_patterns)
             fullname = Purr.canonizePath(os.path.join(dirname, fname))
             if fullname not in self.watchers:
                 wfile = Purrer.WatchedFile(fullname,
                                            quiet=quiet,
                                            mtime=self.timestamp)
                 self.watchers[fullname] = wfile
                 dprintf(
                     3, "watching file %s, timestamp %s, quiet %d\n",
                     fullname,
                     time.strftime("%x %X",
                                   time.localtime(wfile.mtime)), quiet)
         # find subdirectories  matching the subdir_patterns, and watch them for changes
         for fname in wdir.fileset:
             fullname = Purr.canonizePath(os.path.join(dirname, fname))
             if os.path.isdir(fullname):
                 for desc, dir_patts, canary_patts in self._subdir_patterns:
                     if matches_patterns(fname, dir_patts):
                         quiet = matches_patterns(fname,
                                                  self._quiet_patterns)
                         wdir = Purrer.WatchedSubdir(
                             fullname,
                             canary_patterns=canary_patts,
                             quiet=quiet,
                             mtime=self.timestamp)
                         self.watchers[fullname] = wdir
                         dprintf(
                             3,
                             "watching subdirectory %s/{%s}, timestamp %s, quiet %d\n",
                             fullname, ",".join(canary_patts),
                             time.strftime("%x %X",
                                           time.localtime(wdir.mtime)),
                             quiet)
                         break
     # set state and save config
     self.setWatchingState(dirname, watching, save_config=save_config)
示例#7
0
 def save(self,
          dirname=None,
          refresh=0,
          refresh_index=True,
          emit_message=True):
     """Saves entry in the given directory. Data products will be copied over if not
     residing in that directory.
     'refresh' is a timestamp, passed to renderIndex(), causing all data products OLDER than the specified time to be regenerated.
     'refresh_index', if true, causes index files to be re-rendered unconditionally
     """
     if not refresh and not self.updated:
         return
     timestr = time.strftime("%Y%m%d-%H%M%S",
                             time.localtime(self.timestamp))
     Purr.progressMessage("Rendering entry for %s" % timestr)
     if dirname:
         self.pathname = pathname = os.path.join(
             dirname,
             "%s-%s" % (("ignore" if self.ignore else "entry"), timestr))
     elif not self.pathname:
         raise ValueError("Cannot save entry: pathname not specified")
     else:
         pathname = self.pathname
     # set timestamp
     if not self.timestamp:
         self.timestamp = int(time.time())
     # get canonized path to output directory
     pathname = Purr.canonizePath(pathname)
     if not os.path.exists(pathname):
         os.mkdir(pathname)
     # now save content
     # get device of pathname -- need to know whether we move or copy
     devnum = os.stat(pathname).st_dev
     # copy data products as needed
     dprintf(2, "saving entry %s, %d data products\n", pathname,
             len(self.dps))
     dps = []
     for dp in self.dps:
         # if archived, this indicates a previously saved data product, so ignore it
         # if ignored, no need to save the DP -- but keep it in list
         if dp.archived or dp.ignored:
             dprintf(3, "dp %s is archived or ignored, skipping\n",
                     dp.sourcepath)
             dps.append(dp)
             continue
         # file missing for some reason (perhaps it got removed on us?) skip data product entirely
         if not os.path.exists(dp.sourcepath):
             dprintf(2, "data product %s missing, ignoring\n",
                     dp.sourcepath)
             continue
         Purr.progressMessage("archiving %s" % dp.filename, sub=True)
         # get normalized source and destination paths
         dprintf(2, "data product: %s, rename %s, policy %s\n",
                 dp.sourcepath, dp.filename, dp.policy)
         sourcepath = Purr.canonizePath(dp.sourcepath)
         destname = dp.fullpath = os.path.join(pathname, dp.filename)
         dprintf(2, "data product: %s -> %s\n", sourcepath, destname)
         # does the destination product already exist? skip if same file, else remove
         if os.path.exists(destname):
             if os.path.samefile(destname, sourcepath):
                 dprintf(2, "same file, skipping\n")
                 dp.timestamp = os.path.getmtime(destname)
                 dps.append(dp)
                 continue
             if os.system("/bin/rm -fr '%s'" % destname):
                 print("Error removing %s, which is in the way of %s" %
                       (destname, sourcepath))
                 print("This data product is not saved.")
                 continue
         # for directories, compress with tar
         if os.path.isdir(sourcepath):
             sourcepath = sourcepath.rstrip('/')
             if dp.policy == "copy" or dp.policy.startswith("move"):
                 dprintf(2, "archiving to tgz\n")
                 if os.system("tar zcf '%s' -C '%s' '%s'" %
                              (destname, os.path.dirname(sourcepath),
                               os.path.basename(sourcepath))):
                     print("Error archiving %s to %s" %
                           (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
                 if dp.policy.startswith("move"):
                     os.system("/bin/rm -fr '%s'" % sourcepath)
         # else just a file
         else:
             # now copy/move it over
             if dp.policy == "copy":
                 dprintf(2, "copying\n")
                 if _copy_update(sourcepath, destname):
                     print("Error copying %s to %s" %
                           (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
             elif dp.policy.startswith('move'):
                 if _move_update(sourcepath, destname):
                     print("Error moving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
         # success, set timestamp and append
         dp.timestamp = os.path.getmtime(destname)
         dp.archived = True
         dps.append(dp)
     # reset list of data products
     self.dps = dps
     # now write out content
     self.cached_include = os.path.join(pathname, 'index.include.html')
     self.cached_include_valid = False
     self.index_file = os.path.join(pathname, "index.html")
     self.generateIndex(refresh=refresh,
                        refresh_index=refresh_index and time.time())
     self.updated = False
示例#8
0
 def save(self, dirname=None, refresh=0, refresh_index=True, emit_message=True):
     """Saves entry in the given directory. Data products will be copied over if not
     residing in that directory.
     'refresh' is a timestamp, passed to renderIndex(), causing all data products OLDER than the specified time to be regenerated.
     'refresh_index', if true, causes index files to be re-rendered unconditionally
     """
     if not refresh and not self.updated:
         return
     timestr = time.strftime("%Y%m%d-%H%M%S", time.localtime(self.timestamp))
     Purr.progressMessage("Rendering entry for %s" % timestr)
     if dirname:
         self.pathname = pathname = os.path.join(dirname, "%s-%s" %
                                                 (("ignore" if self.ignore else "entry"), timestr))
     elif not self.pathname:
         raise ValueError("Cannot save entry: pathname not specified")
     else:
         pathname = self.pathname
     # set timestamp
     if not self.timestamp:
         self.timestamp = int(time.time())
     # get canonized path to output directory
     pathname = Purr.canonizePath(pathname)
     if not os.path.exists(pathname):
         os.mkdir(pathname)
     # now save content
     # get device of pathname -- need to know whether we move or copy
     devnum = os.stat(pathname).st_dev
     # copy data products as needed
     dprintf(2, "saving entry %s, %d data products\n", pathname, len(self.dps))
     dps = []
     for dp in self.dps:
         # if archived, this indicates a previously saved data product, so ignore it
         # if ignored, no need to save the DP -- but keep it in list
         if dp.archived or dp.ignored:
             dprintf(3, "dp %s is archived or ignored, skipping\n", dp.sourcepath)
             dps.append(dp)
             continue
         # file missing for some reason (perhaps it got removed on us?) skip data product entirely
         if not os.path.exists(dp.sourcepath):
             dprintf(2, "data product %s missing, ignoring\n", dp.sourcepath)
             continue
         Purr.progressMessage("archiving %s" % dp.filename, sub=True)
         # get normalized source and destination paths
         dprintf(2, "data product: %s, rename %s, policy %s\n", dp.sourcepath, dp.filename, dp.policy)
         sourcepath = Purr.canonizePath(dp.sourcepath)
         destname = dp.fullpath = os.path.join(pathname, dp.filename)
         dprintf(2, "data product: %s -> %s\n", sourcepath, destname)
         # does the destination product already exist? skip if same file, else remove
         if os.path.exists(destname):
             if os.path.samefile(destname, sourcepath):
                 dprintf(2, "same file, skipping\n")
                 dp.timestamp = os.path.getmtime(destname)
                 dps.append(dp)
                 continue
             if os.system("/bin/rm -fr '%s'" % destname):
                 print("Error removing %s, which is in the way of %s" % (destname, sourcepath))
                 print("This data product is not saved.")
                 continue
         # for directories, compress with tar
         if os.path.isdir(sourcepath):
             sourcepath = sourcepath.rstrip('/')
             if dp.policy == "copy" or dp.policy.startswith("move"):
                 dprintf(2, "archiving to tgz\n")
                 if os.system("tar zcf '%s' -C '%s' '%s'" % (destname,
                                                             os.path.dirname(sourcepath),
                                                             os.path.basename(sourcepath))):
                     print("Error archiving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
                 if dp.policy.startswith("move"):
                     os.system("/bin/rm -fr '%s'" % sourcepath)
         # else just a file
         else:
             # now copy/move it over
             if dp.policy == "copy":
                 dprintf(2, "copying\n")
                 if _copy_update(sourcepath, destname):
                     print("Error copying %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
             elif dp.policy.startswith('move'):
                 if _move_update(sourcepath, destname):
                     print("Error moving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
         # success, set timestamp and append
         dp.timestamp = os.path.getmtime(destname)
         dp.archived = True
         dps.append(dp)
     # reset list of data products
     self.dps = dps
     # now write out content
     self.cached_include = os.path.join(pathname, 'index.include.html')
     self.cached_include_valid = False
     self.index_file = os.path.join(pathname, "index.html")
     self.generateIndex(refresh=refresh, refresh_index=refresh_index and time.time())
     self.updated = False