def _parallel_process_vessels_from_list(vessel_list, process_func, lockserver_handle, *args):
  """
  Obtain locks on all of the nodes of vessels in vessel_list, get fresh vessel
  objects from the databae, and then parallelize a call to process_func to
  process each vessel in vessel_list (passing the additional *args to
  process_func).
  """
  
  node_id_list = []
  for vessel in vessel_list:
    node_id = maindb.get_node_identifier_from_vessel(vessel)
    # Lock names must be unique, and there could be multiple vessels from the
    # same node in the vessel_list.
    if node_id not in node_id_list:
      node_id_list.append(node_id)

  # Lock the nodes that these vessels are on.
  lockserver.lock_multiple_nodes(lockserver_handle, node_id_list)
  try:
    # Get new vessel objects from the db now that we have node locks.
    new_vessel_list = []
    for vessel in vessel_list:
      node_id = maindb.get_node_identifier_from_vessel(vessel)
      new_vessel_list.append(maindb.get_vessel(node_id, vessel.name))
    # Have the list object the caller may still be using contain the actual
    # vessel objects we have processed. That is, we've just replaced the
    # caller's list's contents with new vessel objects for the same vessels.
    vessel_list[:] = new_vessel_list[:]
  
    return parallel.run_parallelized(vessel_list, process_func, *args)
    
  finally:
    # Unlock the nodes.
    lockserver.unlock_multiple_nodes(lockserver_handle, node_id_list)
Beispiel #2
0
def _parallel_process_vessels_from_list(vessel_list, process_func,
                                        lockserver_handle, *args):
    """
  Obtain locks on all of the nodes of vessels in vessel_list, get fresh vessel
  objects from the databae, and then parallelize a call to process_func to
  process each vessel in vessel_list (passing the additional *args to
  process_func).
  """

    node_id_list = []
    for vessel in vessel_list:
        node_id = maindb.get_node_identifier_from_vessel(vessel)
        # Lock names must be unique, and there could be multiple vessels from the
        # same node in the vessel_list.
        if node_id not in node_id_list:
            node_id_list.append(node_id)

    # Lock the nodes that these vessels are on.
    lockserver.lock_multiple_nodes(lockserver_handle, node_id_list)
    try:
        # Get new vessel objects from the db now that we have node locks.
        new_vessel_list = []
        for vessel in vessel_list:
            node_id = maindb.get_node_identifier_from_vessel(vessel)
            new_vessel_list.append(maindb.get_vessel(node_id, vessel.name))
        # Have the list object the caller may still be using contain the actual
        # vessel objects we have processed. That is, we've just replaced the
        # caller's list's contents with new vessel objects for the same vessels.
        vessel_list[:] = new_vessel_list[:]

        return parallel.run_parallelized(vessel_list, process_func, *args)

    finally:
        # Unlock the nodes.
        lockserver.unlock_multiple_nodes(lockserver_handle, node_id_list)
def _cleanup_single_vessel(vessel):
  """
  This function is passed by cleanup_vessels() as the function argument to
  run_parallelized().
  """
  
  # This does seem wasteful of lockserver communication to require four
  # round-trips with the lockserver (get handle, lock, unlock, release handle),
  # but if we really want to fix that then I think the best thing to do would
  # be to allow obtaining a lockhandle and releasing a lockhandle to be done
  # in the same calls as lock acquisition and release. 
  
  node_id = maindb.get_node_identifier_from_vessel(vessel)
  lockserver_handle = lockserver.create_lockserver_handle()
  
  # Lock the node that the vessels is on.
  lockserver.lock_node(lockserver_handle, node_id)
  try:
    # Get a new vessel object from the db in case it was modified in the db
    # before the lock was obtained.
    vessel = maindb.get_vessel(node_id, vessel.name)
    
    # Now that we have a lock on the node that this vessel is on, find out
    # if we should still clean up this vessel (e.g. maybe a node state
    # transition script moved the node to a new state and this vessel was
    # removed).
    needscleanup, reasonwhynot = maindb.does_vessel_need_cleanup(vessel)
    if not needscleanup:
      log.info("[_cleanup_single_vessel] Vessel " + str(vessel) + 
               " no longer needs cleanup: " + reasonwhynot)
      return
    
    nodeid = maindb.get_node_identifier_from_vessel(vessel)
    nodehandle = _get_node_handle_from_nodeid(nodeid)
    
    try:
      log.info("[_cleanup_single_vessel] About to ChangeUsers on vessel " + str(vessel))
      nodemanager.change_users(nodehandle, vessel.name, [''])
      log.info("[_cleanup_single_vessel] About to ResetVessel on vessel " + str(vessel))
      nodemanager.reset_vessel(nodehandle, vessel.name)
    except NodemanagerCommunicationError:
      # We don't pass this exception up. Maybe the node is offline now. At some
      # point, it will be marked in the database as offline (should we be doing
      # that here?). At that time, the dirty vessels on that node will not be
      # in the cleanup list anymore.
      log.info("[_cleanup_single_vessel] Failed to cleanup vessel " + 
               str(vessel) + ". " + traceback.format_exc())
      return
      
    # We only mark it as clean if no exception was raised when trying to
    # perform the above nodemanager operations.
    maindb.mark_vessel_as_clean(vessel)
  
    log.info("[_cleanup_single_vessel] Successfully cleaned up vessel " + str(vessel))

  finally:
    # Unlock the node.
    lockserver.unlock_node(lockserver_handle, node_id)
    lockserver.destroy_lockserver_handle(lockserver_handle)
def _do_acquire_vessel(vessel, geniuser):
  """
  Perform that actual acquisition of the vessel by the user (through the
  backend) and update the database accordingly if the vessel is successfully
  acquired.
  
  This gets called parallelized after a node lock is already obtained for the
  vessel.

  When an UnableToAcquireResourcesError is raised, the exception message
  will contain the string "UnableToAcquireResourcesError" so that it can be
  seen in the results of a call to repy's parallelization function.  
  """
  
  node_id = maindb.get_node_identifier_from_vessel(vessel)
  
  if vessel.acquired_by_user is not None:
    message = "Vessel already acquired once the node lock was obtained."
    raise UnableToAcquireResourcesError("UnableToAcquireResourcesError: " + message)
  
  node = maindb.get_node(node_id)
  if node.is_active is False:
    message = "Vessel's node is no longer active once the node lock was obtained."
    raise UnableToAcquireResourcesError("UnableToAcquireResourcesError: " + message)
  
  # This will raise a UnableToAcquireResourcesException if it fails (e.g if
  # the node is down). We want to allow the exception to be passed up to
  # the caller.
  try:
    backend.acquire_vessel(geniuser, vessel)
  except UnableToAcquireResourcesError, e:
    raise UnableToAcquireResourcesError("UnableToAcquireResourcesError: " + str(e))
Beispiel #5
0
def _do_acquire_vessel(vessel, geniuser):
    """
  Perform that actual acquisition of the vessel by the user (through the
  backend) and update the database accordingly if the vessel is successfully
  acquired.
  
  This gets called parallelized after a node lock is already obtained for the
  vessel.

  When an UnableToAcquireResourcesError is raised, the exception message
  will contain the string "UnableToAcquireResourcesError" so that it can be
  seen in the results of a call to repy's parallelization function.  
  """

    node_id = maindb.get_node_identifier_from_vessel(vessel)

    if vessel.acquired_by_user is not None:
        message = "Vessel already acquired once the node lock was obtained."
        raise UnableToAcquireResourcesError("UnableToAcquireResourcesError: " +
                                            message)

    node = maindb.get_node(node_id)
    if node.is_active is False:
        message = "Vessel's node is no longer active once the node lock was obtained."
        raise UnableToAcquireResourcesError("UnableToAcquireResourcesError: " +
                                            message)

    # This will raise a UnableToAcquireResourcesException if it fails (e.g if
    # the node is down). We want to allow the exception to be passed up to
    # the caller.
    try:
        backend.acquire_vessel(geniuser, vessel)
    except UnableToAcquireResourcesError, e:
        raise UnableToAcquireResourcesError("UnableToAcquireResourcesError: " +
                                            str(e))
Beispiel #6
0
def get_vessel_infodict_list(vessel_list):
    """
  <Purpose>
    Convert a list of Vessel objects into a list of vessel infodicts.
    An "infodict" is a dictionary of vessel information that contains data
    which is safe for public display.
    
    This function needs to return lists of dictionaries with a minimum of the
    following, according to https://seattle.poly.edu/wiki/SeattleGeniApi:
      {'node_ip':node_ip, 'node_port':node_port, 'vessel_id':vessel_id, 
      'node_id':node_id, 'handle':handle}
  <Arguments>
    vessel_list
      A list of Vessel objects.
  <Exceptions>
    None
  <Side Effects>
    None
  <Returns>
    A list of vessel infodicts.
  """
    infodict_list = []

    for vessel in vessel_list:
        vessel_info = {}

        vessel_info["node_id"] = maindb.get_node_identifier_from_vessel(vessel)
        node = maindb.get_node(vessel_info["node_id"])

        vessel_info["node_ip"] = node.last_known_ip
        vessel_info["node_port"] = node.last_known_port
        vessel_info["vessel_id"] = vessel.name

        vessel_info["handle"] = vessel_info["node_id"] + ":" + vessel.name

        vessel_info["is_active"] = node.is_active

        expires_in_timedelta = vessel.date_expires - datetime.datetime.now()
        # The timedelta object stores information in two parts: days and seconds.
        vessel_info["expires_in_seconds"] = (expires_in_timedelta.days * 3600 *
                                             24) + expires_in_timedelta.seconds

        infodict_list.append(vessel_info)

    return infodict_list
Beispiel #7
0
def get_vessel_infodict_list(vessel_list):
    """
  <Purpose>
    Convert a list of Vessel objects into a list of vessel infodicts.
    An "infodict" is a dictionary of vessel information that contains data
    which is safe for public display.
    
    This function needs to return lists of dictionaries with a minimum of the
    following, according to https://seattle.poly.edu/wiki/SeattleGeniApi:
      {'node_ip':node_ip, 'node_port':node_port, 'vessel_id':vessel_id, 
      'node_id':node_id, 'handle':handle}
  <Arguments>
    vessel_list
      A list of Vessel objects.
  <Exceptions>
    None
  <Side Effects>
    None
  <Returns>
    A list of vessel infodicts.
  """
    infodict_list = []

    for vessel in vessel_list:
        vessel_info = {}

        vessel_info["node_id"] = maindb.get_node_identifier_from_vessel(vessel)
        node = maindb.get_node(vessel_info["node_id"])

        vessel_info["node_ip"] = node.last_known_ip
        vessel_info["node_port"] = node.last_known_port
        vessel_info["vessel_id"] = vessel.name

        vessel_info["handle"] = vessel_info["node_id"] + ":" + vessel.name

        vessel_info["is_active"] = node.is_active

        expires_in_timedelta = vessel.date_expires - datetime.datetime.now()
        # The timedelta object stores information in two parts: days and seconds.
        vessel_info["expires_in_seconds"] = (expires_in_timedelta.days * 3600 * 24) + expires_in_timedelta.seconds

        infodict_list.append(vessel_info)

    return infodict_list
def _sync_user_keys_of_single_vessel(vessel):
  """
  This function is passed by sync_user_keys_of_vessels() as the function
  argument to run_parallelized().
  """
  
  # This does seem wasteful of lockserver communication to require four
  # round-trips with the lockserver (get handle, lock, unlock, release handle),
  # but if we really want to fix that then I think the best thing to do would
  # be to allow obtaining a lockhandle and releasing a lockhandle to be done
  # in the same calls as lock acquisition and release. 
  
  node_id = maindb.get_node_identifier_from_vessel(vessel)
  lockserver_handle = lockserver.create_lockserver_handle()
  
  # Lock the node that the vessels is on.
  lockserver.lock_node(lockserver_handle, node_id)
  try:
    # Get a new vessel object from the db in case it was modified in the db
    # before the lock was obtained.
    vessel = maindb.get_vessel(node_id, vessel.name)
  
    # Now that we have a lock on the node that this vessel is on, find out
    # if we should still sync user keys on this vessel (e.g. maybe a node state
    # transition script moved the node to a new state and this vessel was
    # removed).
    needssync, reasonwhynot = maindb.does_vessel_need_user_key_sync(vessel)
    if not needssync:
      log.info("[_sync_user_keys_of_single_vessel] Vessel " + str(vessel) + 
               " no longer needs user key sync: " + reasonwhynot)
      return
    
    nodeid = maindb.get_node_identifier_from_vessel(vessel)
    nodehandle = _get_node_handle_from_nodeid(nodeid)
    
    # The list returned from get_users_with_access_to_vessel includes the key of
    # the user who has acquired the vessel along with any other users they have
    # given access to.
    user_list = maindb.get_users_with_access_to_vessel(vessel)
    
    key_list = []
    for user in user_list:
      key_list.append(user.user_pubkey)
      
    if len(key_list) == 0:
      raise InternalError("InternalError: Empty user key list for vessel " + str(vessel))
    
    try:
      log.info("[_sync_user_keys_of_single_vessel] About to ChangeUsers on vessel " + str(vessel))
      nodemanager.change_users(nodehandle, vessel.name, key_list)
    except NodemanagerCommunicationError:
      # We don't pass this exception up. Maybe the node is offline now. At some
      # point, it will be marked in the database as offline and won't show up in
      # our list of vessels to sync user keys of anymore.
      log.info("[_sync_user_keys_of_single_vessel] Failed to sync user keys of vessel " + 
               str(vessel) + ". " + traceback.format_exc())
      return
      
    # We only mark it as sync'd if no exception was raised when trying to perform
    # the above nodemanager operations.
    maindb.mark_vessel_as_not_needing_user_key_sync(vessel)
  
    log.info("[_sync_user_keys_of_single_vessel] Successfully sync'd user keys of vessel " + str(vessel))

  finally:
    # Unlock the node.
    lockserver.unlock_node(lockserver_handle, node_id)
    lockserver.destroy_lockserver_handle(lockserver_handle)
def _sync_user_keys_of_single_vessel(vessel):
    """
  This function is passed by sync_user_keys_of_vessels() as the function
  argument to run_parallelized().
  """

    # This does seem wasteful of lockserver communication to require four
    # round-trips with the lockserver (get handle, lock, unlock, release handle),
    # but if we really want to fix that then I think the best thing to do would
    # be to allow obtaining a lockhandle and releasing a lockhandle to be done
    # in the same calls as lock acquisition and release.

    node_id = maindb.get_node_identifier_from_vessel(vessel)
    lockserver_handle = lockserver.create_lockserver_handle()

    # Lock the node that the vessels is on.
    lockserver.lock_node(lockserver_handle, node_id)
    try:
        # Get a new vessel object from the db in case it was modified in the db
        # before the lock was obtained.
        vessel = maindb.get_vessel(node_id, vessel.name)

        # Now that we have a lock on the node that this vessel is on, find out
        # if we should still sync user keys on this vessel (e.g. maybe a node state
        # transition script moved the node to a new state and this vessel was
        # removed).
        needssync, reasonwhynot = maindb.does_vessel_need_user_key_sync(vessel)
        if not needssync:
            log.info("[_sync_user_keys_of_single_vessel] Vessel " +
                     str(vessel) + " no longer needs user key sync: " +
                     reasonwhynot)
            return

        nodeid = maindb.get_node_identifier_from_vessel(vessel)
        nodehandle = _get_node_handle_from_nodeid(nodeid)

        # The list returned from get_users_with_access_to_vessel includes the key of
        # the user who has acquired the vessel along with any other users they have
        # given access to.
        user_list = maindb.get_users_with_access_to_vessel(vessel)

        key_list = []
        for user in user_list:
            key_list.append(user.user_pubkey)

        if len(key_list) == 0:
            raise InternalError(
                "InternalError: Empty user key list for vessel " + str(vessel))

        try:
            log.info(
                "[_sync_user_keys_of_single_vessel] About to ChangeUsers on vessel "
                + str(vessel))
            nodemanager.change_users(nodehandle, vessel.name, key_list)
        except NodemanagerCommunicationError:
            # We don't pass this exception up. Maybe the node is offline now. At some
            # point, it will be marked in the database as offline and won't show up in
            # our list of vessels to sync user keys of anymore.
            log.info(
                "[_sync_user_keys_of_single_vessel] Failed to sync user keys of vessel "
                + str(vessel) + ". " + traceback.format_exc())
            return

        # We only mark it as sync'd if no exception was raised when trying to perform
        # the above nodemanager operations.
        maindb.mark_vessel_as_not_needing_user_key_sync(vessel)

        log.info(
            "[_sync_user_keys_of_single_vessel] Successfully sync'd user keys of vessel "
            + str(vessel))

    finally:
        # Unlock the node.
        lockserver.unlock_node(lockserver_handle, node_id)
        lockserver.destroy_lockserver_handle(lockserver_handle)
def _cleanup_single_vessel(vessel):
    """
  This function is passed by cleanup_vessels() as the function argument to
  run_parallelized().
  """

    # This does seem wasteful of lockserver communication to require four
    # round-trips with the lockserver (get handle, lock, unlock, release handle),
    # but if we really want to fix that then I think the best thing to do would
    # be to allow obtaining a lockhandle and releasing a lockhandle to be done
    # in the same calls as lock acquisition and release.

    node_id = maindb.get_node_identifier_from_vessel(vessel)
    lockserver_handle = lockserver.create_lockserver_handle()

    # Lock the node that the vessels is on.
    lockserver.lock_node(lockserver_handle, node_id)
    try:
        # Get a new vessel object from the db in case it was modified in the db
        # before the lock was obtained.
        vessel = maindb.get_vessel(node_id, vessel.name)

        # Now that we have a lock on the node that this vessel is on, find out
        # if we should still clean up this vessel (e.g. maybe a node state
        # transition script moved the node to a new state and this vessel was
        # removed).
        needscleanup, reasonwhynot = maindb.does_vessel_need_cleanup(vessel)
        if not needscleanup:
            log.info("[_cleanup_single_vessel] Vessel " + str(vessel) +
                     " no longer needs cleanup: " + reasonwhynot)
            return

        nodeid = maindb.get_node_identifier_from_vessel(vessel)
        nodehandle = _get_node_handle_from_nodeid(nodeid)

        try:
            log.info(
                "[_cleanup_single_vessel] About to ChangeUsers on vessel " +
                str(vessel))
            nodemanager.change_users(nodehandle, vessel.name, [''])
            log.info(
                "[_cleanup_single_vessel] About to ResetVessel on vessel " +
                str(vessel))
            nodemanager.reset_vessel(nodehandle, vessel.name)
        except NodemanagerCommunicationError:
            # We don't pass this exception up. Maybe the node is offline now. At some
            # point, it will be marked in the database as offline (should we be doing
            # that here?). At that time, the dirty vessels on that node will not be
            # in the cleanup list anymore.
            log.info("[_cleanup_single_vessel] Failed to cleanup vessel " +
                     str(vessel) + ". " + traceback.format_exc())
            return

        # We only mark it as clean if no exception was raised when trying to
        # perform the above nodemanager operations.
        maindb.mark_vessel_as_clean(vessel)

        log.info("[_cleanup_single_vessel] Successfully cleaned up vessel " +
                 str(vessel))

    finally:
        # Unlock the node.
        lockserver.unlock_node(lockserver_handle, node_id)
        lockserver.destroy_lockserver_handle(lockserver_handle)