Пример #1
0
def _parallel_process_vessels_from_list(vessel_list, process_func, lockserver_handle, *args):
  """
  Obtain locks on all of the nodes of vessels in vessel_list, get fresh vessel
  objects from the databae, and then parallelize a call to process_func to
  process each vessel in vessel_list (passing the additional *args to
  process_func).
  """
  
  node_id_list = []
  for vessel in vessel_list:
    node_id = maindb.get_node_identifier_from_vessel(vessel)
    # Lock names must be unique, and there could be multiple vessels from the
    # same node in the vessel_list.
    if node_id not in node_id_list:
      node_id_list.append(node_id)

  # Lock the nodes that these vessels are on.
  lockserver.lock_multiple_nodes(lockserver_handle, node_id_list)
  try:
    # Get new vessel objects from the db now that we have node locks.
    new_vessel_list = []
    for vessel in vessel_list:
      node_id = maindb.get_node_identifier_from_vessel(vessel)
      new_vessel_list.append(maindb.get_vessel(node_id, vessel.name))
    # Have the list object the caller may still be using contain the actual
    # vessel objects we have processed. That is, we've just replaced the
    # caller's list's contents with new vessel objects for the same vessels.
    vessel_list[:] = new_vessel_list[:]
  
    return parallel.run_parallelized(vessel_list, process_func, *args)
    
  finally:
    # Unlock the nodes.
    lockserver.unlock_multiple_nodes(lockserver_handle, node_id_list)
Пример #2
0
def sync_user_keys_of_vessels():
    """
  This function is started as separate thread. It continually checks whether
  there are vessels needing their user keys sync'd and initiates the user key
  sync as needed.
  """

    log.info("[sync_user_keys_of_vessels] thread started.")

    # Run forever.
    while True:

        try:

            # Sleep a few seconds for those times where we don't have any vessels to clean up.
            time.sleep(5)

            # We shouldn't be running the backend in production with
            # settings.DEBUG = True. Just in case, though, tell django to reset its
            # list of saved queries each time through the loop.
            if settings.DEBUG:
                django.db.reset_queries()

            # Get a list of vessels that need to have user keys sync'd. This doesn't
            # include nodes known to be inactive as we would just continue failing to
            # communicate with nodes that are down.
            vessellist = maindb.get_vessels_needing_user_key_sync()
            if len(vessellist) == 0:
                continue

            log.info("[sync_user_keys_of_vessels] " + str(len(vessellist)) +
                     " vessels to have user keys sync'd: " + str(vessellist))

            parallel_results = parallel.run_parallelized(
                vessellist, _sync_user_keys_of_single_vessel)

            if len(parallel_results["exception"]) > 0:
                for vessel, exception_message in parallel_results["exception"]:
                    log_message = "Unhandled exception during parallelized vessel user key sync: " + exception_message
                    log.critical(log_message)
                # Raise the last exceptions so that the admin gets an email.
                raise InternalError(log_message)

        except:
            message = "[sync_user_keys_of_vessels] Something very bad happened: " + traceback.format_exc(
            )
            log.critical(message)

            # Send an email to the addresses listed in settings.ADMINS
            if not settings.DEBUG:
                subject = "Critical SeattleGeni backend error"
                django.core.mail.mail_admins(subject, message)

                # Sleep for ten minutes to make sure we don't flood the admins with error
                # report emails.
                time.sleep(600)
Пример #3
0
def sync_user_keys_of_vessels():
  """
  This function is started as separate thread. It continually checks whether
  there are vessels needing their user keys sync'd and initiates the user key
  sync as needed.
  """

  log.info("[sync_user_keys_of_vessels] thread started.")

  # Run forever.
  while True:
    
    try:
      
      # Sleep a few seconds for those times where we don't have any vessels to clean up.
      time.sleep(5)
      
      # We shouldn't be running the backend in production with
      # settings.DEBUG = True. Just in case, though, tell django to reset its
      # list of saved queries each time through the loop.
      if settings.DEBUG:
        django.db.reset_queries()
      
      # Get a list of vessels that need to have user keys sync'd. This doesn't
      # include nodes known to be inactive as we would just continue failing to
      # communicate with nodes that are down.
      vessellist = maindb.get_vessels_needing_user_key_sync()
      if len(vessellist) == 0:
        continue
        
      log.info("[sync_user_keys_of_vessels] " + str(len(vessellist)) + 
               " vessels to have user keys sync'd: " + str(vessellist))
     
      parallel_results = parallel.run_parallelized(vessellist, _sync_user_keys_of_single_vessel)
     
      if len(parallel_results["exception"]) > 0:
        for vessel, exception_message in parallel_results["exception"]:
          log_message = "Unhandled exception during parallelized vessel user key sync: " + exception_message
          log.critical(log_message)
        # Raise the last exceptions so that the admin gets an email.
        raise InternalError(log_message)
        
    except:
      message = "[sync_user_keys_of_vessels] Something very bad happened: " + traceback.format_exc()
      log.critical(message)
      
      # Send an email to the addresses listed in settings.ADMINS
      if not settings.DEBUG:
        subject = "Critical SeattleGeni backend error"
        django.core.mail.mail_admins(subject, message)
        
        # Sleep for ten minutes to make sure we don't flood the admins with error
        # report emails.
        time.sleep(600)
Пример #4
0
def cleanup_vessels():
  """
  This function is started as separate thread. It continually checks whether
  there are vessels needing to be cleaned up and initiates cleanup as needed.
  """
  
  log.info("[cleanup_vessels] cleanup thread started.")

  # Start a transaction management.
  django.db.transaction.enter_transaction_management()

  # Run forever.
  while True:
    
    try:
      
      # Sleep a few seconds for those times where we don't have any vessels to clean up.
      time.sleep(5)
      
      # We shouldn't be running the backend in production with
      # settings.DEBUG = True. Just in case, though, tell django to reset its
      # list of saved queries each time through the loop. Note that this is not
      # specific to the cleanup thread as other parts of the backend are using
      # the maindb, as well, so we're overloading the purpose of the cleanup
      # thread by doing this here. This is just a convenient place to do it.
      # See http://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
      # for more info.
      if settings.DEBUG:
        django.db.reset_queries()
      
      # First, make it so that expired vessels are seen as dirty. We aren't
      # holding a lock on the nodes when we do this. It's possible that we do
      # this while someone else has a lock on the node. What would result?
      # I believe the worst result is that a user has their vessel marked as
      # dirty after they renewed in the case where they are renewing it just
      # as it expires (with some exceptionally bad timing involved). And, 
      # that's not really very bad as if the user is trying to renew at the
      # exact moment it expires, their trying their luck with how fast their
      # request gets processed, anyways. In short, I don't think it's important
      # enough to either obtain locks to do this or to rewrite the code to
      # avoid any need for separately marking expired vessels as dirty rather
      # than just trying to process expired vessels directly in the code below.
      date_started=datetime.datetime.now()
      expired_list = maindb.mark_expired_vessels_as_dirty()
      if len(expired_list) > 0:
        log.info("[cleanup_vessels] " + str(len(expired_list)) + 
                 " expired vessels have been marked as dirty: " + str(expired_list))
        maindb.create_action_log_event("mark_expired_vessels_as_dirty", user=None, second_arg=None,
                                       third_arg=None, was_successful=True, message=None,
                                       date_started=date_started, vessel_list=expired_list)

      # Get a list of vessels to clean up. This doesn't include nodes known to
      # be inactive as we would just continue failing to communicate with nodes
      # that are down.
      cleanupvessellist = maindb.get_vessels_needing_cleanup()
      if len(cleanupvessellist) == 0:
        continue
        
      log.info("[cleanup_vessels] " + str(len(cleanupvessellist)) + " vessels to clean up: " + str(cleanupvessellist))
      
      parallel_results = parallel.run_parallelized(cleanupvessellist, _cleanup_single_vessel)
        
      if len(parallel_results["exception"]) > 0:
        for vessel, exception_message in parallel_results["exception"]:
          log_message = "Unhandled exception during parallelized vessel cleanup: " + exception_message
          log.critical(log_message)
        # Raise the last exceptions so that the admin gets an email.
        raise InternalError(log_message)  
    
    except:
      message = "[cleanup_vessels] Something very bad happened: " + traceback.format_exc()
      log.critical(message)
      
      # Send an email to the addresses listed in settings.ADMINS
      if not settings.DEBUG:
        subject = "Critical SeattleGeni backend error"
        django.core.mail.mail_admins(subject, message)
        
        # Sleep for ten minutes to make sure we don't flood the admins with error
        # report emails.
        time.sleep(600)
    finally:
      # Manually commit the transaction to prevent caching.
      django.db.transaction.commit()
Пример #5
0
def cleanup_vessels():
    """
  This function is started as separate thread. It continually checks whether
  there are vessels needing to be cleaned up and initiates cleanup as needed.
  """

    log.info("[cleanup_vessels] cleanup thread started.")

    # Start a transaction management.
    django.db.transaction.enter_transaction_management()

    # Run forever.
    while True:

        try:

            # Sleep a few seconds for those times where we don't have any vessels to clean up.
            time.sleep(5)

            # We shouldn't be running the backend in production with
            # settings.DEBUG = True. Just in case, though, tell django to reset its
            # list of saved queries each time through the loop. Note that this is not
            # specific to the cleanup thread as other parts of the backend are using
            # the maindb, as well, so we're overloading the purpose of the cleanup
            # thread by doing this here. This is just a convenient place to do it.
            # See http://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
            # for more info.
            if settings.DEBUG:
                django.db.reset_queries()

            # First, make it so that expired vessels are seen as dirty. We aren't
            # holding a lock on the nodes when we do this. It's possible that we do
            # this while someone else has a lock on the node. What would result?
            # I believe the worst result is that a user has their vessel marked as
            # dirty after they renewed in the case where they are renewing it just
            # as it expires (with some exceptionally bad timing involved). And,
            # that's not really very bad as if the user is trying to renew at the
            # exact moment it expires, their trying their luck with how fast their
            # request gets processed, anyways. In short, I don't think it's important
            # enough to either obtain locks to do this or to rewrite the code to
            # avoid any need for separately marking expired vessels as dirty rather
            # than just trying to process expired vessels directly in the code below.
            date_started = datetime.datetime.now()
            expired_list = maindb.mark_expired_vessels_as_dirty()
            if len(expired_list) > 0:
                log.info("[cleanup_vessels] " + str(len(expired_list)) +
                         " expired vessels have been marked as dirty: " +
                         str(expired_list))
                maindb.create_action_log_event("mark_expired_vessels_as_dirty",
                                               user=None,
                                               second_arg=None,
                                               third_arg=None,
                                               was_successful=True,
                                               message=None,
                                               date_started=date_started,
                                               vessel_list=expired_list)

            # Get a list of vessels to clean up. This doesn't include nodes known to
            # be inactive as we would just continue failing to communicate with nodes
            # that are down.
            cleanupvessellist = maindb.get_vessels_needing_cleanup()
            if len(cleanupvessellist) == 0:
                continue

            log.info("[cleanup_vessels] " + str(len(cleanupvessellist)) +
                     " vessels to clean up: " + str(cleanupvessellist))

            parallel_results = parallel.run_parallelized(
                cleanupvessellist, _cleanup_single_vessel)

            if len(parallel_results["exception"]) > 0:
                for vessel, exception_message in parallel_results["exception"]:
                    log_message = "Unhandled exception during parallelized vessel cleanup: " + exception_message
                    log.critical(log_message)
                # Raise the last exceptions so that the admin gets an email.
                raise InternalError(log_message)

        except:
            message = "[cleanup_vessels] Something very bad happened: " + traceback.format_exc(
            )
            log.critical(message)

            # Send an email to the addresses listed in settings.ADMINS
            if not settings.DEBUG:
                subject = "Critical SeattleGeni backend error"
                django.core.mail.mail_admins(subject, message)

                # Sleep for ten minutes to make sure we don't flood the admins with error
                # report emails.
                time.sleep(600)
        finally:
            # Manually commit the transaction to prevent caching.
            django.db.transaction.commit()