def do_logging_func_call(*args, **kwargs): # We are actually going to ignore kwargs and assume keyword arguments # aren't being used for the interface calls we are logging with this. date_started = datetime.datetime.now() user = None second_arg = None third_arg = None vessel_list = [] # Check if the first arguments is a GeniUser object. We expect it to # always be at the moment, so this is just in case things change. if args and isinstance(args[0], models.GeniUser): user = args[0] # The interface calls we're using this decorator on may have one or two # additional arguments after the geniuser object. If they exist, they # are either vessel lists or other values we want to log. if len(args) > 1: if _is_vessel_list(args[1]): vessel_list = args[1] else: second_arg = str(args[1]) if len(args) > 2: if _is_vessel_list(args[2]): vessel_list = args[2] else: third_arg = str(args[2]) try: result = func(*args, **kwargs) # If a vessel list is returned, that's the one we want even if we took # one in as an argument. if _is_vessel_list(result): vessel_list = result was_successful = True message = None maindb.create_action_log_event(func.__name__, user, second_arg, third_arg, was_successful, message, date_started, vessel_list) return result except Exception, e: was_successful = False message = str(e) maindb.create_action_log_event(func.__name__, user, second_arg, third_arg, was_successful, message, date_started, vessel_list) raise
def cleanup_vessels(): """ This function is started as separate thread. It continually checks whether there are vessels needing to be cleaned up and initiates cleanup as needed. """ log.info("[cleanup_vessels] cleanup thread started.") # Start a transaction management. django.db.transaction.enter_transaction_management() # Run forever. while True: try: # Sleep a few seconds for those times where we don't have any vessels to clean up. time.sleep(5) # We shouldn't be running the backend in production with # settings.DEBUG = True. Just in case, though, tell django to reset its # list of saved queries each time through the loop. Note that this is not # specific to the cleanup thread as other parts of the backend are using # the maindb, as well, so we're overloading the purpose of the cleanup # thread by doing this here. This is just a convenient place to do it. # See http://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory # for more info. if settings.DEBUG: django.db.reset_queries() # First, make it so that expired vessels are seen as dirty. We aren't # holding a lock on the nodes when we do this. It's possible that we do # this while someone else has a lock on the node. What would result? # I believe the worst result is that a user has their vessel marked as # dirty after they renewed in the case where they are renewing it just # as it expires (with some exceptionally bad timing involved). And, # that's not really very bad as if the user is trying to renew at the # exact moment it expires, their trying their luck with how fast their # request gets processed, anyways. In short, I don't think it's important # enough to either obtain locks to do this or to rewrite the code to # avoid any need for separately marking expired vessels as dirty rather # than just trying to process expired vessels directly in the code below. date_started=datetime.datetime.now() expired_list = maindb.mark_expired_vessels_as_dirty() if len(expired_list) > 0: log.info("[cleanup_vessels] " + str(len(expired_list)) + " expired vessels have been marked as dirty: " + str(expired_list)) maindb.create_action_log_event("mark_expired_vessels_as_dirty", user=None, second_arg=None, third_arg=None, was_successful=True, message=None, date_started=date_started, vessel_list=expired_list) # Get a list of vessels to clean up. This doesn't include nodes known to # be inactive as we would just continue failing to communicate with nodes # that are down. cleanupvessellist = maindb.get_vessels_needing_cleanup() if len(cleanupvessellist) == 0: continue log.info("[cleanup_vessels] " + str(len(cleanupvessellist)) + " vessels to clean up: " + str(cleanupvessellist)) parallel_results = parallel.run_parallelized(cleanupvessellist, _cleanup_single_vessel) if len(parallel_results["exception"]) > 0: for vessel, exception_message in parallel_results["exception"]: log_message = "Unhandled exception during parallelized vessel cleanup: " + exception_message log.critical(log_message) # Raise the last exceptions so that the admin gets an email. raise InternalError(log_message) except: message = "[cleanup_vessels] Something very bad happened: " + traceback.format_exc() log.critical(message) # Send an email to the addresses listed in settings.ADMINS if not settings.DEBUG: subject = "Critical SeattleGeni backend error" django.core.mail.mail_admins(subject, message) # Sleep for ten minutes to make sure we don't flood the admins with error # report emails. time.sleep(600) finally: # Manually commit the transaction to prevent caching. django.db.transaction.commit()
def cleanup_vessels(): """ This function is started as separate thread. It continually checks whether there are vessels needing to be cleaned up and initiates cleanup as needed. """ log.info("[cleanup_vessels] cleanup thread started.") # Start a transaction management. django.db.transaction.enter_transaction_management() # Run forever. while True: try: # Sleep a few seconds for those times where we don't have any vessels to clean up. time.sleep(5) # We shouldn't be running the backend in production with # settings.DEBUG = True. Just in case, though, tell django to reset its # list of saved queries each time through the loop. Note that this is not # specific to the cleanup thread as other parts of the backend are using # the maindb, as well, so we're overloading the purpose of the cleanup # thread by doing this here. This is just a convenient place to do it. # See http://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory # for more info. if settings.DEBUG: django.db.reset_queries() # First, make it so that expired vessels are seen as dirty. We aren't # holding a lock on the nodes when we do this. It's possible that we do # this while someone else has a lock on the node. What would result? # I believe the worst result is that a user has their vessel marked as # dirty after they renewed in the case where they are renewing it just # as it expires (with some exceptionally bad timing involved). And, # that's not really very bad as if the user is trying to renew at the # exact moment it expires, their trying their luck with how fast their # request gets processed, anyways. In short, I don't think it's important # enough to either obtain locks to do this or to rewrite the code to # avoid any need for separately marking expired vessels as dirty rather # than just trying to process expired vessels directly in the code below. date_started = datetime.datetime.now() expired_list = maindb.mark_expired_vessels_as_dirty() if len(expired_list) > 0: log.info("[cleanup_vessels] " + str(len(expired_list)) + " expired vessels have been marked as dirty: " + str(expired_list)) maindb.create_action_log_event("mark_expired_vessels_as_dirty", user=None, second_arg=None, third_arg=None, was_successful=True, message=None, date_started=date_started, vessel_list=expired_list) # Get a list of vessels to clean up. This doesn't include nodes known to # be inactive as we would just continue failing to communicate with nodes # that are down. cleanupvessellist = maindb.get_vessels_needing_cleanup() if len(cleanupvessellist) == 0: continue log.info("[cleanup_vessels] " + str(len(cleanupvessellist)) + " vessels to clean up: " + str(cleanupvessellist)) parallel_results = parallel.run_parallelized( cleanupvessellist, _cleanup_single_vessel) if len(parallel_results["exception"]) > 0: for vessel, exception_message in parallel_results["exception"]: log_message = "Unhandled exception during parallelized vessel cleanup: " + exception_message log.critical(log_message) # Raise the last exceptions so that the admin gets an email. raise InternalError(log_message) except: message = "[cleanup_vessels] Something very bad happened: " + traceback.format_exc( ) log.critical(message) # Send an email to the addresses listed in settings.ADMINS if not settings.DEBUG: subject = "Critical SeattleGeni backend error" django.core.mail.mail_admins(subject, message) # Sleep for ten minutes to make sure we don't flood the admins with error # report emails. time.sleep(600) finally: # Manually commit the transaction to prevent caching. django.db.transaction.commit()