def run(self): """Renders the job using a JobRendered, encapsulating all processing errors and exceptions, with the addition here of a processing timeout. Returns one of the RESULT_ constants. """ self.__thread.start() self.__thread.join(self.__timeout) # If the thread is no longer alive, the timeout was not reached and all # is well. if not self.__thread.isAlive(): return self.__thread.result LOG.info("Rendering of job #%d took too long (timeout reached)!" % self.__thread.job.id) # Remove the job files self.__thread.job.remove_all_files() # Kill the thread and return TIMEOUT_REACHED self.__thread.kill() del self.__thread LOG.debug("Thread removed.") return RESULT_TIMEOUT_REACHED
def kill(self): LOG.debug("Killing job #%d's worker thread..." % self.job.id) res = ctypes.pythonapi.PyThreadState_SetAsyncExc(self.__get_my_tid(), ctypes.py_object(SystemExit)) if res == 0: raise ValueError("Invalid thread ID") elif res != 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(self.__get_my_tid(), 0) raise SystemError("PyThreadState_SetAsync failed")
def run(self): """Run the main garbage collector thread loop, cleaning files every self.frequency seconds until the program is stopped.""" LOG.info("Cleanup thread started.") while True: self.cleanup() time.sleep(self.frequency)
def kill(self): LOG.debug("Killing job #%d's worker thread..." % self.job.id) res = ctypes.pythonapi.PyThreadState_SetAsyncExc( self.__get_my_tid(), ctypes.py_object(SystemExit)) if res == 0: raise ValueError("Invalid thread ID") elif res != 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(self.__get_my_tid(), 0) raise SystemError("PyThreadState_SetAsync failed")
def serve(self): """Implement a basic service loop, looking every self.frequency seconds for a new job to render and dispatch it if one's available. This method can of course be overloaded by subclasses of MapOSMaticDaemon depending on their needs.""" while True: try: job = MapRenderingJob.objects.to_render()[0] self.dispatch(job) except IndexError: try: time.sleep(self.frequency) except KeyboardInterrupt: break LOG.info("MapOSMatic rendering daemon terminating.")
def run(self): """Renders the given job, encapsulating all processing errors and exceptions. This does not affect the job entry in the database in any way. It's the responsibility of the caller to do maintain the job status in the database. Returns one of the RESULT_ constants. """ LOG.info("Rendering job #%d '%s'..." % (self.job.id, self.job.maptitle)) try: if not self.job.administrative_city: bbox = BoundingBox( self.job.lat_upper_left, self.job.lon_upper_left, self.job.lat_bottom_right, self.job.lon_bottom_right, ) renderer = OCitySMap( config_file=OCITYSMAP_CFG_PATH, map_areas_prefix=self.prefix, boundingbox=bbox, language=self.job.map_language, ) else: renderer = OCitySMap( config_file=OCITYSMAP_CFG_PATH, map_areas_prefix=self.prefix, osmid=self.job.administrative_osmid, language=self.job.map_language, ) except KeyboardInterrupt: self.result = RESULT_KEYBOARD_INTERRUPT LOG.info("Rendering of job #%d interrupted!" % self.job.id) return self.result except Exception, e: self.result = RESULT_PREPARATION_EXCEPTION LOG.exception("Rendering of job #%d failed (exception occurred during" " data preparation)!" % self.job.id) return self.result
def run(self): """Renders the given job, encapsulating all processing errors and exceptions. This does not affect the job entry in the database in any way. It's the responsibility of the caller to do maintain the job status in the database. Returns one of the RESULT_ constants. """ LOG.info("Rendering job #%d '%s'..." % (self.job.id, self.job.maptitle)) try: if not self.job.administrative_city: bbox = BoundingBox(self.job.lat_upper_left, self.job.lon_upper_left, self.job.lat_bottom_right, self.job.lon_bottom_right) renderer = OCitySMap(config_file=OCITYSMAP_CFG_PATH, map_areas_prefix=self.prefix, boundingbox=bbox, language=self.job.map_language) else: renderer = OCitySMap(config_file=OCITYSMAP_CFG_PATH, map_areas_prefix=self.prefix, osmid=self.job.administrative_osmid, language=self.job.map_language) except KeyboardInterrupt: self.result = RESULT_KEYBOARD_INTERRUPT LOG.info("Rendering of job #%d interrupted!" % self.job.id) return self.result except Exception, e: self.result = RESULT_PREPARATION_EXCEPTION LOG.exception( "Rendering of job #%d failed (exception occurred during" " data preparation)!" % self.job.id) return self.result
def __init__(self, frequency=_DEFAULT_POLL_FREQUENCY): self.frequency = frequency LOG.info("MapOSMatic rendering daemon started.") self.rollback_orphaned_jobs()
def cleanup(self): """Run one iteration of the cleanup loop. A sorted list of files from the renderings directory is first created, oldest files last. Files are then pop()-ed out of the list and removed by cleanup_files() until we're back below the size threshold.""" files = map( lambda f: self.get_file_info(f), [ os.path.join(RENDERING_RESULT_PATH, f) for f in os.listdir(RENDERING_RESULT_PATH) if not (f.startswith(".") or f.endswith(render.THUMBNAIL_SUFFIX)) ], ) # Compute the total size occupied by the renderings, and the actual 80% # threshold, in bytes. size = reduce(lambda x, y: x + y["size"], files, 0) threshold = 0.8 * RENDERING_RESULT_MAX_SIZE_GB * 1024 * 1024 * 1024 # Stop here if we are below the threshold if size < threshold: return LOG.info( "%s consumed for a %s threshold. Cleaning..." % (self.get_formatted_value(size), self.get_formatted_value(threshold)) ) # Sort files by timestamp, oldest last, and start removing them by # pop()-ing the list. files.sort(lambda x, y: cmp(y["time"], x["time"])) while size > threshold: if not len(files): LOG.error("No files to remove and still above threshold! " "Something's wrong!") return f = files.pop() LOG.debug("Considering file %s..." % f["name"]) job = MapRenderingJob.objects.get_by_filename(f["name"]) if job: LOG.debug("Found matching parent job #%d." % job.id) removed, saved = job.remove_all_files() size -= saved if removed: LOG.info( "Removed %d files for job #%d (%s)." % (removed, job.id, self.get_formatted_details(saved, size, threshold)) ) else: # If we didn't find a parent job, it means this is an orphaned # file, we can safely remove it to get back some disk space. LOG.debug("No parent job found.") os.remove(f["path"]) size -= f["size"] LOG.info( "Removed orphan file %s (%s)." % (f["name"], self.get_formatted_details(f["size"], size, threshold)) )
LOG.debug("Found matching parent job #%d." % job.id) removed, saved = job.remove_all_files() size -= saved if removed: LOG.info( "Removed %d files for job #%d (%s)." % (removed, job.id, self.get_formatted_details(saved, size, threshold)) ) else: # If we didn't find a parent job, it means this is an orphaned # file, we can safely remove it to get back some disk space. LOG.debug("No parent job found.") os.remove(f["path"]) size -= f["size"] LOG.info( "Removed orphan file %s (%s)." % (f["name"], self.get_formatted_details(f["size"], size, threshold)) ) if __name__ == "__main__": if not os.path.exists(RENDERING_RESULT_PATH) or not os.path.isdir(RENDERING_RESULT_PATH): LOG.error("%s does not exist or is not a directory! " "Please use a valid RENDERING_RESULT_PATH.") sys.exit(1) cleaner = RenderingsGarbageCollector() daemon = MapOSMaticDaemon() cleaner.start() daemon.serve()
class JobRenderer(threading.Thread): """ A simple, blocking job rendered. It can be used as a thread, or directly in the main processing path of the caller if it chooses to call run() directly. """ def __init__(self, job, prefix): threading.Thread.__init__(self, name='renderer') self.job = job self.prefix = prefix self.result = None def __get_my_tid(self): if not self.isAlive(): raise threading.ThreadError("the thread is not active") # Do we have it cached? if hasattr(self, '__thread_id'): return self.__thread_id # If not, look for it for tid, tobj in threading._active.items(): if tobj is self: self.__thread_id = tid return self.__thread_id raise AssertionError("Could not resolve the thread's ID") def kill(self): LOG.debug("Killing job #%d's worker thread..." % self.job.id) res = ctypes.pythonapi.PyThreadState_SetAsyncExc( self.__get_my_tid(), ctypes.py_object(SystemExit)) if res == 0: raise ValueError("Invalid thread ID") elif res != 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(self.__get_my_tid(), 0) raise SystemError("PyThreadState_SetAsync failed") def run(self): """Renders the given job, encapsulating all processing errors and exceptions. This does not affect the job entry in the database in any way. It's the responsibility of the caller to do maintain the job status in the database. Returns one of the RESULT_ constants. """ LOG.info("Rendering job #%d '%s'..." % (self.job.id, self.job.maptitle)) try: if not self.job.administrative_city: bbox = BoundingBox(self.job.lat_upper_left, self.job.lon_upper_left, self.job.lat_bottom_right, self.job.lon_bottom_right) renderer = OCitySMap(config_file=OCITYSMAP_CFG_PATH, map_areas_prefix=self.prefix, boundingbox=bbox, language=self.job.map_language) else: renderer = OCitySMap(config_file=OCITYSMAP_CFG_PATH, map_areas_prefix=self.prefix, osmid=self.job.administrative_osmid, language=self.job.map_language) except KeyboardInterrupt: self.result = RESULT_KEYBOARD_INTERRUPT LOG.info("Rendering of job #%d interrupted!" % self.job.id) return self.result except Exception, e: self.result = RESULT_PREPARATION_EXCEPTION LOG.exception( "Rendering of job #%d failed (exception occurred during" " data preparation)!" % self.job.id) return self.result prefix = os.path.join(RENDERING_RESULT_PATH, self.job.files_prefix()) try: # Render the map in all RENDERING_RESULT_FORMATS result = renderer.render_map_into_files(self.job.maptitle, prefix, RENDERING_RESULT_FORMATS, 'zoom:16') # Render the index in all RENDERING_RESULT_FORMATS, using the # same map size. renderer.render_index(self.job.maptitle, prefix, RENDERING_RESULT_FORMATS, result.width, result.height) # Create thumbnail if 'png' in RENDERING_RESULT_FORMATS: img = Image.open(prefix + '.png') img.thumbnail((200, 200), Image.ANTIALIAS) img.save(prefix + THUMBNAIL_SUFFIX) self.result = RESULT_SUCCESS LOG.info("Finished rendering of job #%d." % self.job.id) except KeyboardInterrupt: self.result = RESULT_KEYBOARD_INTERRUPT LOG.info("Rendering of job #%d interrupted!" % self.job.id) except Exception, e: self.result = RESULT_RENDERING_EXCEPTION LOG.warning(e) LOG.warning( "Rendering of job #%d failed (exception occurred during" " rendering)!" % self.job.id)
if removed: LOG.info( "Removed %d files for job #%d (%s)." % (removed, job.id, self.get_formatted_details(saved, size, threshold))) else: # If we didn't find a parent job, it means this is an orphaned # file, we can safely remove it to get back some disk space. LOG.debug("No parent job found.") os.remove(f['path']) size -= f['size'] LOG.info( "Removed orphan file %s (%s)." % (f['name'], self.get_formatted_details(f['size'], size, threshold))) if __name__ == '__main__': if (not os.path.exists(RENDERING_RESULT_PATH) or not os.path.isdir(RENDERING_RESULT_PATH)): LOG.error("%s does not exist or is not a directory! " "Please use a valid RENDERING_RESULT_PATH.") sys.exit(1) cleaner = RenderingsGarbageCollector() daemon = MapOSMaticDaemon() cleaner.start() daemon.serve()
def cleanup(self): """Run one iteration of the cleanup loop. A sorted list of files from the renderings directory is first created, oldest files last. Files are then pop()-ed out of the list and removed by cleanup_files() until we're back below the size threshold.""" files = map(lambda f: self.get_file_info(f), [ os.path.join(RENDERING_RESULT_PATH, f) for f in os.listdir(RENDERING_RESULT_PATH) if not (f.startswith('.') or f.endswith(render.THUMBNAIL_SUFFIX)) ]) # Compute the total size occupied by the renderings, and the actual 80% # threshold, in bytes. size = reduce(lambda x, y: x + y['size'], files, 0) threshold = 0.8 * RENDERING_RESULT_MAX_SIZE_GB * 1024 * 1024 * 1024 # Stop here if we are below the threshold if size < threshold: return LOG.info("%s consumed for a %s threshold. Cleaning..." % (self.get_formatted_value(size), self.get_formatted_value(threshold))) # Sort files by timestamp, oldest last, and start removing them by # pop()-ing the list. files.sort(lambda x, y: cmp(y['time'], x['time'])) while size > threshold: if not len(files): LOG.error("No files to remove and still above threshold! " "Something's wrong!") return f = files.pop() LOG.debug("Considering file %s..." % f['name']) job = MapRenderingJob.objects.get_by_filename(f['name']) if job: LOG.debug("Found matching parent job #%d." % job.id) removed, saved = job.remove_all_files() size -= saved if removed: LOG.info( "Removed %d files for job #%d (%s)." % (removed, job.id, self.get_formatted_details(saved, size, threshold))) else: # If we didn't find a parent job, it means this is an orphaned # file, we can safely remove it to get back some disk space. LOG.debug("No parent job found.") os.remove(f['path']) size -= f['size'] LOG.info( "Removed orphan file %s (%s)." % (f['name'], self.get_formatted_details(f['size'], size, threshold)))