def run(self): """Renders the job using a JobRendered, encapsulating all processing errors and exceptions, with the addition here of a processing timeout. Returns one of the RESULT_ constants. """ self.__thread.start() self.__thread.join(self.__timeout) # If the thread is no longer alive, the timeout was not reached and all # is well. if not self.__thread.isAlive(): return self.__thread.result LOG.info("Rendering of job #%d took too long (timeout reached)!" % self.__thread.job.id) # Remove the job files self.__thread.job.remove_all_files() # Kill the thread and return TIMEOUT_REACHED self.__thread.kill() del self.__thread LOG.debug("Thread removed.") return RESULT_TIMEOUT_REACHED
def kill(self): LOG.debug("Killing job #%d's worker thread..." % self.job.id) res = ctypes.pythonapi.PyThreadState_SetAsyncExc(self.__get_my_tid(), ctypes.py_object(SystemExit)) if res == 0: raise ValueError("Invalid thread ID") elif res != 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(self.__get_my_tid(), 0) raise SystemError("PyThreadState_SetAsync failed")
def kill(self): LOG.debug("Killing job #%d's worker thread..." % self.job.id) res = ctypes.pythonapi.PyThreadState_SetAsyncExc( self.__get_my_tid(), ctypes.py_object(SystemExit)) if res == 0: raise ValueError("Invalid thread ID") elif res != 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(self.__get_my_tid(), 0) raise SystemError("PyThreadState_SetAsync failed")
def cleanup(self): """Run one iteration of the cleanup loop. A sorted list of files from the renderings directory is first created, oldest files last. Files are then pop()-ed out of the list and removed by cleanup_files() until we're back below the size threshold.""" files = map( lambda f: self.get_file_info(f), [ os.path.join(RENDERING_RESULT_PATH, f) for f in os.listdir(RENDERING_RESULT_PATH) if not (f.startswith(".") or f.endswith(render.THUMBNAIL_SUFFIX)) ], ) # Compute the total size occupied by the renderings, and the actual 80% # threshold, in bytes. size = reduce(lambda x, y: x + y["size"], files, 0) threshold = 0.8 * RENDERING_RESULT_MAX_SIZE_GB * 1024 * 1024 * 1024 # Stop here if we are below the threshold if size < threshold: return LOG.info( "%s consumed for a %s threshold. Cleaning..." % (self.get_formatted_value(size), self.get_formatted_value(threshold)) ) # Sort files by timestamp, oldest last, and start removing them by # pop()-ing the list. files.sort(lambda x, y: cmp(y["time"], x["time"])) while size > threshold: if not len(files): LOG.error("No files to remove and still above threshold! " "Something's wrong!") return f = files.pop() LOG.debug("Considering file %s..." % f["name"]) job = MapRenderingJob.objects.get_by_filename(f["name"]) if job: LOG.debug("Found matching parent job #%d." % job.id) removed, saved = job.remove_all_files() size -= saved if removed: LOG.info( "Removed %d files for job #%d (%s)." % (removed, job.id, self.get_formatted_details(saved, size, threshold)) ) else: # If we didn't find a parent job, it means this is an orphaned # file, we can safely remove it to get back some disk space. LOG.debug("No parent job found.") os.remove(f["path"]) size -= f["size"] LOG.info( "Removed orphan file %s (%s)." % (f["name"], self.get_formatted_details(f["size"], size, threshold)) )
def cleanup(self): """Run one iteration of the cleanup loop. A sorted list of files from the renderings directory is first created, oldest files last. Files are then pop()-ed out of the list and removed by cleanup_files() until we're back below the size threshold.""" files = map(lambda f: self.get_file_info(f), [ os.path.join(RENDERING_RESULT_PATH, f) for f in os.listdir(RENDERING_RESULT_PATH) if not (f.startswith('.') or f.endswith(render.THUMBNAIL_SUFFIX)) ]) # Compute the total size occupied by the renderings, and the actual 80% # threshold, in bytes. size = reduce(lambda x, y: x + y['size'], files, 0) threshold = 0.8 * RENDERING_RESULT_MAX_SIZE_GB * 1024 * 1024 * 1024 # Stop here if we are below the threshold if size < threshold: return LOG.info("%s consumed for a %s threshold. Cleaning..." % (self.get_formatted_value(size), self.get_formatted_value(threshold))) # Sort files by timestamp, oldest last, and start removing them by # pop()-ing the list. files.sort(lambda x, y: cmp(y['time'], x['time'])) while size > threshold: if not len(files): LOG.error("No files to remove and still above threshold! " "Something's wrong!") return f = files.pop() LOG.debug("Considering file %s..." % f['name']) job = MapRenderingJob.objects.get_by_filename(f['name']) if job: LOG.debug("Found matching parent job #%d." % job.id) removed, saved = job.remove_all_files() size -= saved if removed: LOG.info( "Removed %d files for job #%d (%s)." % (removed, job.id, self.get_formatted_details(saved, size, threshold))) else: # If we didn't find a parent job, it means this is an orphaned # file, we can safely remove it to get back some disk space. LOG.debug("No parent job found.") os.remove(f['path']) size -= f['size'] LOG.info( "Removed orphan file %s (%s)." % (f['name'], self.get_formatted_details(f['size'], size, threshold)))