def cleanup_vessels(): """ This function is started as separate thread. It continually checks whether there are vessels needing to be cleaned up and initiates cleanup as needed. """ log.info("[cleanup_vessels] cleanup thread started.") # Start a transaction management. django.db.transaction.enter_transaction_management() # Run forever. while True: try: # Sleep a few seconds for those times where we don't have any vessels to clean up. time.sleep(5) # We shouldn't be running the backend in production with # settings.DEBUG = True. Just in case, though, tell django to reset its # list of saved queries each time through the loop. Note that this is not # specific to the cleanup thread as other parts of the backend are using # the maindb, as well, so we're overloading the purpose of the cleanup # thread by doing this here. This is just a convenient place to do it. # See http://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory # for more info. if settings.DEBUG: django.db.reset_queries() # First, make it so that expired vessels are seen as dirty. We aren't # holding a lock on the nodes when we do this. It's possible that we do # this while someone else has a lock on the node. What would result? # I believe the worst result is that a user has their vessel marked as # dirty after they renewed in the case where they are renewing it just # as it expires (with some exceptionally bad timing involved). And, # that's not really very bad as if the user is trying to renew at the # exact moment it expires, their trying their luck with how fast their # request gets processed, anyways. In short, I don't think it's important # enough to either obtain locks to do this or to rewrite the code to # avoid any need for separately marking expired vessels as dirty rather # than just trying to process expired vessels directly in the code below. date_started=datetime.datetime.now() expired_list = maindb.mark_expired_vessels_as_dirty() if len(expired_list) > 0: log.info("[cleanup_vessels] " + str(len(expired_list)) + " expired vessels have been marked as dirty: " + str(expired_list)) maindb.create_action_log_event("mark_expired_vessels_as_dirty", user=None, second_arg=None, third_arg=None, was_successful=True, message=None, date_started=date_started, vessel_list=expired_list) # Get a list of vessels to clean up. This doesn't include nodes known to # be inactive as we would just continue failing to communicate with nodes # that are down. cleanupvessellist = maindb.get_vessels_needing_cleanup() if len(cleanupvessellist) == 0: continue log.info("[cleanup_vessels] " + str(len(cleanupvessellist)) + " vessels to clean up: " + str(cleanupvessellist)) parallel_results = parallel.run_parallelized(cleanupvessellist, _cleanup_single_vessel) if len(parallel_results["exception"]) > 0: for vessel, exception_message in parallel_results["exception"]: log_message = "Unhandled exception during parallelized vessel cleanup: " + exception_message log.critical(log_message) # Raise the last exceptions so that the admin gets an email. raise InternalError(log_message) except: message = "[cleanup_vessels] Something very bad happened: " + traceback.format_exc() log.critical(message) # Send an email to the addresses listed in settings.ADMINS if not settings.DEBUG: subject = "Critical SeattleGeni backend error" django.core.mail.mail_admins(subject, message) # Sleep for ten minutes to make sure we don't flood the admins with error # report emails. time.sleep(600) finally: # Manually commit the transaction to prevent caching. django.db.transaction.commit()
def cleanup_vessels(): """ This function is started as separate thread. It continually checks whether there are vessels needing to be cleaned up and initiates cleanup as needed. """ log.info("[cleanup_vessels] cleanup thread started.") # Start a transaction management. django.db.transaction.enter_transaction_management() # Run forever. while True: try: # Sleep a few seconds for those times where we don't have any vessels to clean up. time.sleep(5) # We shouldn't be running the backend in production with # settings.DEBUG = True. Just in case, though, tell django to reset its # list of saved queries each time through the loop. Note that this is not # specific to the cleanup thread as other parts of the backend are using # the maindb, as well, so we're overloading the purpose of the cleanup # thread by doing this here. This is just a convenient place to do it. # See http://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory # for more info. if settings.DEBUG: django.db.reset_queries() # First, make it so that expired vessels are seen as dirty. We aren't # holding a lock on the nodes when we do this. It's possible that we do # this while someone else has a lock on the node. What would result? # I believe the worst result is that a user has their vessel marked as # dirty after they renewed in the case where they are renewing it just # as it expires (with some exceptionally bad timing involved). And, # that's not really very bad as if the user is trying to renew at the # exact moment it expires, their trying their luck with how fast their # request gets processed, anyways. In short, I don't think it's important # enough to either obtain locks to do this or to rewrite the code to # avoid any need for separately marking expired vessels as dirty rather # than just trying to process expired vessels directly in the code below. date_started = datetime.datetime.now() expired_list = maindb.mark_expired_vessels_as_dirty() if len(expired_list) > 0: log.info("[cleanup_vessels] " + str(len(expired_list)) + " expired vessels have been marked as dirty: " + str(expired_list)) maindb.create_action_log_event("mark_expired_vessels_as_dirty", user=None, second_arg=None, third_arg=None, was_successful=True, message=None, date_started=date_started, vessel_list=expired_list) # Get a list of vessels to clean up. This doesn't include nodes known to # be inactive as we would just continue failing to communicate with nodes # that are down. cleanupvessellist = maindb.get_vessels_needing_cleanup() if len(cleanupvessellist) == 0: continue log.info("[cleanup_vessels] " + str(len(cleanupvessellist)) + " vessels to clean up: " + str(cleanupvessellist)) parallel_results = parallel.run_parallelized( cleanupvessellist, _cleanup_single_vessel) if len(parallel_results["exception"]) > 0: for vessel, exception_message in parallel_results["exception"]: log_message = "Unhandled exception during parallelized vessel cleanup: " + exception_message log.critical(log_message) # Raise the last exceptions so that the admin gets an email. raise InternalError(log_message) except: message = "[cleanup_vessels] Something very bad happened: " + traceback.format_exc( ) log.critical(message) # Send an email to the addresses listed in settings.ADMINS if not settings.DEBUG: subject = "Critical SeattleGeni backend error" django.core.mail.mail_admins(subject, message) # Sleep for ten minutes to make sure we don't flood the admins with error # report emails. time.sleep(600) finally: # Manually commit the transaction to prevent caching. django.db.transaction.commit()
def _dirty_vessels(): return len(maindb.get_vessels_needing_cleanup())