def download(self, basefile=None, reporter=None): if basefile: # expect a basefile on the form "subrepoalias:basefile" or # just "subrepoalias:" subrepoalias, basefile = basefile.split(":") else: subrepoalias = None if not basefile: basefile = None # ie convert '' => None found = False for cls in self.subrepos: if (subrepoalias is None or cls.alias == subrepoalias): found = True inst = self.get_instance(cls) basefiles = [] try: ret = inst.download(basefile, reporter=basefiles.append) except Exception as e: loc = util.location_exception(e) self.log.error("download for %s failed: %s (%s)" % (cls.alias, e, loc)) ret = False finally: for b in basefiles: util.link_or_copy(inst.store.documententry_path(b), self.store.documententry_path(b)) # msbfs/entries/.root.json -> myndfs/entries/msbfs.json util.link_or_copy(inst.store.documententry_path(".root"), self.store.documententry_path(inst.alias)) if not found: self.log.error("Couldn't find any subrepo with alias %s" % subrepoalias)
def download(self, basefile=None): for c in self.subrepos: inst = self.get_instance(c) # make sure that our store has access to our now # initialized subrepo objects if c not in self.store.docrepo_instances: self.store.docrepo_instances[c] = inst try: # temporarily re-set the logging level so that the # subrepos INFO messages get reported (see note in # get_instance). loglevel_workaround = False if (self.log.getEffectiveLevel() == logging.INFO and inst.log.getEffectiveLevel() == logging.INFO + 1): loglevel_workaround = True inst.log.setLevel(self.log.getEffectiveLevel()) ret = inst.download(basefile) if loglevel_workaround: inst.log.setLevel(self.log.getEffectiveLevel() + 1) except Exception as e: # be resilient loc = util.location_exception(e) self.log.error("download for %s failed: %s (%s)" % (c.alias, e, loc)) ret = False if basefile and ret: # we got the doc we want, we're done! return
def download(self, basefile=None): for c in self.subrepos: inst = self.get_instance(c) # make sure that our store has access to our now # initialized subrepo objects if c not in self.store.docrepo_instances: self.store.docrepo_instances[c] = inst try: ret = inst.download(basefile) except Exception as e: # be resilient loc = util.location_exception(e) self.log.error("download for %s failed: %s (%s)" % (c.alias, e, loc)) ret = False if basefile and ret: # we got the doc we want, we're done! return
def download(self, basefile=None, reporter=None): if basefile: # expect a basefile on the form "subrepoalias:basefile" or # just "subrepoalias:" subrepoalias, basefile = basefile.split(":") else: subrepoalias = None if not basefile: basefile = None # ie convert '' => None found = False for cls in self.subrepos: if (subrepoalias is None or cls.alias == subrepoalias): found = True inst = self.get_instance(cls) # the feature where subrepos has a slighly higher # loglevel to avoid creating almost-duplicate "OK" log # messages is not useful for downloading. So we work # around it here. subrepo_loglevel = inst.log.getEffectiveLevel() if subrepo_loglevel == self.log.getEffectiveLevel() + 1: inst.log.setLevel(self.log.getEffectiveLevel()) basefiles = [] try: ret = inst.download(basefile, reporter=basefiles.append) except Exception as e: loc = util.location_exception(e) self.log.error("download for %s failed: %s (%s)" % (cls.alias, e, loc)) ret = False finally: inst.log.setLevel(subrepo_loglevel) for b in basefiles: util.link_or_copy(inst.store.documententry_path(b), self.store.documententry_path(b)) # msbfs/entries/.root.json -> myndfs/entries/msbfs.json util.link_or_copy( inst.store.documententry_path(".root"), self.store.documententry_path(inst.alias)) if not found: self.log.error("Couldn't find any subrepo with alias %s" % subrepoalias)
def download(self, basefile=None, reporter=None): if basefile: # expect a basefile on the form "subrepoalias:basefile" or # just "subrepoalias:" subrepoalias, basefile = basefile.split(":") else: subrepoalias = None if not basefile: basefile = None # ie convert '' => None found = False for cls in self.subrepos: if (subrepoalias is None or cls.alias == subrepoalias): found = True inst = self.get_instance(cls) # the feature where subrepos has a slighly higher # loglevel to avoid creating almost-duplicate "OK" log # messages is not useful for downloading. So we work # around it here. subrepo_loglevel = inst.log.getEffectiveLevel() if subrepo_loglevel == self.log.getEffectiveLevel() + 1: inst.log.setLevel(self.log.getEffectiveLevel()) basefiles = [] try: ret = inst.download(basefile, reporter=basefiles.append) except Exception as e: loc = util.location_exception(e) self.log.error("download for %s failed: %s (%s)" % (cls.alias, e, loc)) ret = False finally: inst.log.setLevel(subrepo_loglevel) for b in basefiles: util.link_or_copy(inst.store.documententry_path(b), self.store.documententry_path(b)) # msbfs/entries/.root.json -> myndfs/entries/msbfs.json util.link_or_copy(inst.store.documententry_path(".root"), self.store.documententry_path(inst.alias)) if not found: self.log.error("Couldn't find any subrepo with alias %s" % subrepoalias)
def updateentry(f, section, entrypath, *args, **kwargs): """runs the provided function with the provided arguments, captures any logged events emitted, catches any errors, and records the result in the entry file under the provided section. The basefile is assumed to be the first element in args. """ def clear(key, d): if key in d: del d[key] logstream = StringIO() handler = logging.StreamHandler(logstream) # FIXME: Think about which format is optimal for storing in # docentry. Do we need eg name and levelname? Should we log # date as well as time? fmt = "%(asctime)s %(name)s %(levelname)s %(message)s" formatter = logging.Formatter(fmt, datefmt="%H:%M:%S") handler.setFormatter(formatter) handler.setLevel(logging.WARNING) rootlog = logging.getLogger() rootlog.addHandler(handler) start = datetime.datetime.now() try: ret = f(*args, **kwargs) success = True except DocumentRemovedError as e: success = "removed" raise except Exception as e: success = False errortype, errorval, errortb = sys.exc_info() raise except KeyboardInterrupt as e: success = None raise else: return ret finally: rootlog.removeHandler(handler) if success is not None: warnings = logstream.getvalue() entry = DocumentEntry(entrypath) if section not in entry.status: entry.status[section] = {} entry.status[section]['success'] = success entry.status[section]['date'] = start delta = datetime.datetime.now() - start try: duration = delta.total_seconds() except AttributeError: # probably on py26, wich lack total_seconds() duration = delta.seconds + (delta.microseconds / 1000000.0) entry.status[section]['duration'] = duration if warnings: entry.status[section]['warnings'] = warnings else: clear('warnings', entry.status[section]) if not success: entry.status[section]['traceback'] = "".join( format_tb(errortb)) entry.status[section]['error'] = "%s: %s (%s)" % ( errorval.__class__.__name__, errorval, util.location_exception(errorval)) else: clear('traceback', entry.status[section]) clear('error', entry.status[section]) entry.save()
def updateentry(f, section, entrypath, entrypath_arg, *args, **kwargs): """runs the provided function with the provided arguments, captures any logged events emitted, catches any errors, and records the result in the entry file under the provided section. Entrypath should be a function that takes a basefile string and returns the full path to the entry file for that basefile. """ def clear(key, d): if key in d: del d[key] logstream = StringIO() handler = logging.StreamHandler(logstream) # FIXME: Think about which format is optimal for storing in # docentry. Do we need eg name and levelname? Should we log # date as well as time? fmt = "%(asctime)s %(name)s %(levelname)s %(message)s" formatter = logging.Formatter(fmt, datefmt="%H:%M:%S") handler.setFormatter(formatter) handler.setLevel(logging.WARNING) rootlog = logging.getLogger() rootlog.addHandler(handler) start = datetime.datetime.now() try: ret = f(*args, **kwargs) success = True except DocumentRemovedError as e: success = "removed" raise except DocumentRenamedError as e: entrypath_arg = e.newbasefile success = True return e.returnvalue except Exception as e: success = False errortype, errorval, errortb = sys.exc_info() raise except KeyboardInterrupt as e: success = None raise else: return ret finally: rootlog.removeHandler(handler) if success is not None: warnings = logstream.getvalue() entry = DocumentEntry(entrypath(entrypath_arg)) if section not in entry.status: entry.status[section] = {} entry.status[section]['success'] = success entry.status[section]['date'] = start delta = datetime.datetime.now()-start try: duration = delta.total_seconds() except AttributeError: # probably on py26, wich lack total_seconds() duration = delta.seconds + (delta.microseconds / 1000000.0) entry.status[section]['duration'] = duration if warnings: entry.status[section]['warnings'] = warnings else: clear('warnings', entry.status[section]) if not success: entry.status[section]['traceback'] = "".join(format_tb(errortb)) entry.status[section]['error'] = "%s: %s (%s)" % (errorval.__class__.__name__, errorval, util.location_exception(errorval)) else: clear('traceback', entry.status[section]) clear('error', entry.status[section]) entry.save()