def lock(self): """ Create the cache lock unless exists, set mode appropriately """ try: # Attempt to extract the PID from the lock file lock = open(self._lock) pid = lock.readline().strip() lock.close() # Make sure the PID is sane (otherwise ignore it) try: pid = int(pid) except ValueError: log.warn("Malformed cache lock ({0}), ignoring".format(pid)) raise IOError # Check that the process is still running if not os.path.exists("/proc/{0}".format(pid)): log.cache("Breaking stale lock (process {0} dead)".format(pid)) raise IOError log.info("Found lock {0}, opening read-only".format(self._lock)) self._mode = "read-only" except IOError: log.cache("Creating cache lock {0}".format(self._lock)) lock = open(self._lock, "w") lock.write("{0}\n".format(os.getpid())) lock.close() self._mode = "read-write"
def _add(self, testcases): """ Add given test cases to the test run """ # Short info about the action identifiers = [testcase.identifier for testcase in testcases] log.info("Adding {0} to {1}".format( listed(identifiers, "testcase", max=3), self._object.identifier)) # Prepare data and push data = [testcase.id for testcase in testcases] log.data(pretty(data)) try: self._server.TestRun.add_cases(self.id, data) # Handle duplicate entry errors by adding test cases one by one except xmlrpclib.Fault as error: if not "Duplicate entry" in unicode(error): raise log.warn(error) for id in data: try: self._server.TestRun.add_cases(self.id, id) except xmlrpclib.Fault: pass # RunCaseRuns will need update ---> erase current data self._object.caseruns._init()
def load(self): """ Load caches from specified file """ # Nothing to do when persistent caching is off if not self._filename or get_cache_level() < config.CACHE_PERSISTENT: return # Load the saved cache from file try: log.debug("Loading persistent cache from {0}".format( self._filename)) input_file = gzip.open(self._filename, 'rb') data = pickle.load(input_file) input_file.close() except EOFError: log.cache("Cache file empty, will fill it upon exit") return except (IOError, zlib.error) as error: if getattr(error, "errno", None) == 2: log.warn("Cache file not found, will create one on exit") return else: log.error("Failed to load the cache ({0})".format(error)) log.warn("Going on but switching to the CACHE_OBJECTS level") set_cache_level(config.CACHE_OBJECTS) self.unlock() return # Restore cache for immutable & mutable classes first for current_class in self._immutable + self._mutable: try: log.cache("Loading cache for {0}".format( current_class.__name__)) current_class._cache = data[current_class.__name__] except KeyError: log.cache("Failed to load cache for {0}, starting " "with empty".format(current_class.__name__)) current_class._cache = {} # Containers to be loaded last (to prevent object duplicates) for current_class in self._containers: try: log.cache("Loading cache for {0}".format( current_class.__name__)) current_class._cache = data[current_class.__name__] except KeyError: log.cache("Failed to load cache for {0}, starting " "with empty".format(current_class.__name__)) current_class._cache = {} # Wake up container objects from the id-sleep for container in current_class._cache.values(): container._wake() # Clear expired items and give a short summary for debugging self.expire() log.cache("Cache restore stats:\n" + self.stats().strip())
def load(self): """ Load caches from specified file """ # Nothing to do when persistent caching is off if not self._filename or get_cache_level() < config.CACHE_PERSISTENT: return # Load the saved cache from file try: log.debug("Loading persistent cache from {0}".format( self._filename)) input_file = gzip.open(self._filename, 'rb') data = pickle.load(input_file) input_file.close() except EOFError: log.cache("Cache file empty, will fill it upon exit") return except (IOError, zlib.error) as error: if getattr(error, "errno", None) == 2: log.warn("Cache file not found, will create one on exit") return else: log.error("Failed to load the cache ({0})".format(error)) log.warn("Going on but switching to the CACHE_OBJECTS level") set_cache_level(config.CACHE_OBJECTS) self.unlock() return # Restore cache for immutable & mutable classes first for current_class in self._immutable + self._mutable: try: log.cache("Loading cache for {0}".format( current_class.__name__)) current_class._cache = data[current_class.__name__] except KeyError: log.cache("Failed to load cache for {0}, starting " "with empty".format(current_class.__name__)) current_class._cache = {} # Containers to be loaded last (to prevent object duplicates) for current_class in self._containers: try: log.cache("Loading cache for {0}".format( current_class.__name__)) current_class._cache = data[current_class.__name__] except KeyError: log.cache("Failed to load cache for {0}, starting " "with empty".format(current_class.__name__)) current_class._cache = {} # Wake up container objects from the id-sleep for container in current_class._cache.itervalues(): container._wake() # Clear expired items and give a short summary for debugging self.expire() log.cache("Cache restore stats:\n" + self.stats().strip())
def setup(self, filename=None): """ Set cache filename and initialize expiration times """ # Nothing to do when persistent caching is off if get_cache_level() < config.CACHE_PERSISTENT: return # Detect cache filename, argument first, then config if filename is not None: self._filename = filename else: try: self._filename = config.Config().cache.file except AttributeError: log.warn("Persistent caching off " "(cache filename not found in the config)") self._lock = "{0}.lock".format(self._filename) # Initialize user-defined expiration times from the config for klass in self._classes + [ Nitrate, mutable.Mutable, containers.Container ]: try: expiration = getattr(config.Config().expiration, klass.__name__.lower()) except AttributeError: continue # Convert from seconds, handle special values if isinstance(expiration, int): expiration = datetime.timedelta(seconds=expiration) elif expiration == "NEVER_EXPIRE": expiration = config.NEVER_EXPIRE elif expiration == "NEVER_CACHE": expiration = config.NEVER_CACHE # Give warning for invalid expiration values if isinstance(expiration, datetime.timedelta): klass._expiration = expiration log.debug("User defined expiration for {0}: {1}".format( klass.__name__, expiration)) else: log.warn("Invalid expiration time '{0}'".format(expiration))
def setup(self, filename=None): """ Set cache filename and initialize expiration times """ # Nothing to do when persistent caching is off if get_cache_level() < config.CACHE_PERSISTENT: return # Detect cache filename, argument first, then config if filename is not None: self._filename = filename else: try: self._filename = config.Config().cache.file except AttributeError: log.warn("Persistent caching off " "(cache filename not found in the config)") self._lock = "{0}.lock".format(self._filename) # Initialize user-defined expiration times from the config for klass in self._classes + [Nitrate, mutable.Mutable, containers.Container]: try: expiration = getattr( config.Config().expiration, klass.__name__.lower()) except AttributeError: continue # Convert from seconds, handle special values if isinstance(expiration, int): expiration = datetime.timedelta(seconds=expiration) elif expiration == "NEVER_EXPIRE": expiration = config.NEVER_EXPIRE elif expiration == "NEVER_CACHE": expiration = config.NEVER_CACHE # Give warning for invalid expiration values if isinstance(expiration, datetime.timedelta): klass._expiration = expiration log.debug("User defined expiration for {0}: {1}".format( klass.__name__, expiration)) else: log.warn("Invalid expiration time '{0}'".format(expiration))
def _add(self, testcases): """ Add given test cases to the test run """ # Short info about the action identifiers = [testcase.identifier for testcase in testcases] log.info("Adding {0} to {1}".format( listed(identifiers, "testcase", max=3), self._object.identifier)) # Prepare data and push data = [testcase.id for testcase in testcases] log.data(pretty(data)) try: self._server.TestRun.add_cases(self.id, data) # Handle duplicate entry errors by adding test cases one by one except xmlrpclib.Fault as error: if not "Duplicate entry" in six.u(error): raise log.warn(error) for id in data: try: self._server.TestRun.add_cases(self.id, id) except xmlrpclib.Fault: pass # RunCaseRuns will need update ---> erase current data self._object.caseruns._init()