def test_regenerate_user_key(self):
    
    pubkey = "1 2"
    privkey = "3 4 5"
    donor_key = "6 7"
    
    # Create a user who will be doing the acquiring.
    user = maindb.create_user("testuser", "password", "*****@*****.**", "affiliation", 
                              pubkey, privkey, donor_key)
    userport = user.usable_vessel_port

    vesselcount = 4
    
    # Have every vessel acquisition to the backend request succeed.
    calls_results = [True] * vesselcount
    mocklib.mock_backend_acquire_vessel(calls_results)
    
    testutil.create_nodes_on_different_subnets(vesselcount, [userport])

    # Acquire all vessels on behalf of this user.
    all_vessels_list = interface.acquire_vessels(user, vesselcount, 'rand')

    # Release 2 vessels.
    released_vessels_list = all_vessels_list[:2]
    kept_vessels_list = all_vessels_list[2:]
    interface.release_vessels(user, released_vessels_list)
    
    # Ensure all of the vessels are marked as having user keys in sync.
    for vessel in all_vessels_list:
      # Get a fresh vessel from the db.
      vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
      self.assertTrue(vessel.user_keys_in_sync)

    # We expect a single key to be generated through the keygen api (the new
    # user public key).
    mocklib.mock_keygen_generate_keypair([("55 66", "77 88 99")])
    
    interface.change_user_keys(user, pubkey=None)
    
    # Get a new user object from the database.
    user = maindb.get_user(user.username)
    
    # Make sure the user's key changed.
    self.assertEqual(user.user_pubkey, "55 66")
    self.assertEqual(user.user_privkey, "77 88 99")
    
    # Make sure that all of the vessels the user has access to (and no other
    # vessels) are marked as needing user keys to be sync'd.
    # Ensure all of the vessels are marked as having user keys in sync.
    for vessel in kept_vessels_list:
      # Get a fresh vessel from the db.
      vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
      self.assertFalse(vessel.user_keys_in_sync)

    for vessel in released_vessels_list:
      # Get a fresh vessel from the db.
      vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
      self.assertTrue(vessel.user_keys_in_sync)
Esempio n. 2
0
    def test_renew_vessels_dont_belong_to_user(self):

        # Create a user who will be doing the acquiring.
        user = maindb.create_user("testuser", "password",
                                  "*****@*****.**", "affiliation", "1 2",
                                  "2 2 2", "3 4")
        userport = user.usable_vessel_port

        # Create a second user.
        user2 = maindb.create_user("user2", "password", "*****@*****.**",
                                   "affiliation", "1 2", "2 2 2", "3 4")

        vesselcount = 4

        # Have every vessel acquisition to the backend request succeed.
        calls_results = [True] * vesselcount
        mocklib.mock_backend_acquire_vessel(calls_results)

        testutil.create_nodes_on_different_subnets(vesselcount, [userport])

        # Acquire all of the vessels the user can acquire.
        vessel_list = interface.acquire_vessels(user, vesselcount, 'rand')

        release_vessel = vessel_list[0]
        interface.release_vessels(user, [release_vessel])

        # Manually fiddle with one of the vessels to make it owned by user2.
        user2_vessel = vessel_list[1]
        user2_vessel.acquired_by_user = user2
        user2_vessel.save()

        # Try to renew all of the originally acquired vessels, including the ones
        # that were released. We expect these to just be ignored.
        interface.renew_vessels(user, vessel_list)

        # Get fresh vessel objects that reflect the renewal.
        remaining_vessels = interface.get_acquired_vessels(user)
        release_vessel = maindb.get_vessel(release_vessel.node.node_identifier,
                                           release_vessel.name)
        user2_vessel = maindb.get_vessel(user2_vessel.node.node_identifier,
                                         user2_vessel.name)

        now = datetime.datetime.now()
        timedelta_oneday = datetime.timedelta(days=1)

        # Ensure that the vessels the user still has were renewed but that the ones
        # the user released were ignored (not renewed).
        for vessel in remaining_vessels:
            self.assertTrue(vessel.date_expires - now > timedelta_oneday)

        self.assertTrue(user2_vessel.date_expires - now < timedelta_oneday)

        self.assertEqual(release_vessel.date_expires, None)
  def test_renew_vessels_dont_belong_to_user(self):
    
    # Create a user who will be doing the acquiring.
    user = maindb.create_user("testuser", "password", "*****@*****.**", "affiliation", "1 2", "2 2 2", "3 4")
    userport = user.usable_vessel_port
    
    # Create a second user.
    user2 = maindb.create_user("user2", "password", "*****@*****.**", "affiliation", "1 2", "2 2 2", "3 4")
    
    vesselcount = 4
    
    # Have every vessel acquisition to the backend request succeed.
    calls_results = [True] * vesselcount
    mocklib.mock_backend_acquire_vessel(calls_results)
    
    testutil.create_nodes_on_different_subnets(vesselcount, [userport])
    
    # Acquire all of the vessels the user can acquire.
    vessel_list = interface.acquire_vessels(user, vesselcount, 'rand')
    
    release_vessel = vessel_list[0]
    interface.release_vessels(user, [release_vessel])
    
    # Manually fiddle with one of the vessels to make it owned by user2.
    user2_vessel = vessel_list[1]
    user2_vessel.acquired_by_user = user2
    user2_vessel.save()
    
    # Try to renew all of the originally acquired vessels, including the ones
    # that were released. We expect these to just be ignored.
    interface.renew_vessels(user, vessel_list)
  
    # Get fresh vessel objects that reflect the renewal.
    remaining_vessels = interface.get_acquired_vessels(user)
    release_vessel = maindb.get_vessel(release_vessel.node.node_identifier, release_vessel.name)
    user2_vessel = maindb.get_vessel(user2_vessel.node.node_identifier, user2_vessel.name)
   
    now = datetime.datetime.now()
    timedelta_oneday = datetime.timedelta(days=1)
  
    # Ensure that the vessels the user still has were renewed but that the ones
    # the user released were ignored (not renewed).
    for vessel in remaining_vessels:
      self.assertTrue(vessel.date_expires - now > timedelta_oneday)

    self.assertTrue(user2_vessel.date_expires - now < timedelta_oneday)
    
    self.assertEqual(release_vessel.date_expires, None)
Esempio n. 4
0
def get_vessel_list(vesselhandle_list):
    """
  <Purpose>
    Convert a list of vesselhandles into a list of Vessel objects.
  <Arguments>
    vesselhandle_list
      A list of strings where each string is a vesselhandle of the format
      "nodeid:vesselname"
  <Exceptions>
    DoesNotExistError
      If a specified vessel does not exist.
    InvalidRequestError
      If any vesselhandle in the list is not in the correct format.
  <Side Effects>
    None
  <Returns>
    A list of Vessel objects.
  """
    assert_list_of_str(vesselhandle_list)

    vessel_list = []

    for vesselhandle in vesselhandle_list:
        if len((vesselhandle.split(":"))) != 2:
            raise InvalidRequestError("Invalid vesselhandle: " + vesselhandle)

        (nodeid, vesselname) = vesselhandle.split(":")
        # Raises DoesNotExistError if there is no such node/vessel.
        vessel = maindb.get_vessel(nodeid, vesselname)
        vessel_list.append(vessel)

    return vessel_list
Esempio n. 5
0
def _parallel_process_vessels_from_list(vessel_list, process_func, lockserver_handle, *args):
  """
  Obtain locks on all of the nodes of vessels in vessel_list, get fresh vessel
  objects from the databae, and then parallelize a call to process_func to
  process each vessel in vessel_list (passing the additional *args to
  process_func).
  """
  
  node_id_list = []
  for vessel in vessel_list:
    node_id = maindb.get_node_identifier_from_vessel(vessel)
    # Lock names must be unique, and there could be multiple vessels from the
    # same node in the vessel_list.
    if node_id not in node_id_list:
      node_id_list.append(node_id)

  # Lock the nodes that these vessels are on.
  lockserver.lock_multiple_nodes(lockserver_handle, node_id_list)
  try:
    # Get new vessel objects from the db now that we have node locks.
    new_vessel_list = []
    for vessel in vessel_list:
      node_id = maindb.get_node_identifier_from_vessel(vessel)
      new_vessel_list.append(maindb.get_vessel(node_id, vessel.name))
    # Have the list object the caller may still be using contain the actual
    # vessel objects we have processed. That is, we've just replaced the
    # caller's list's contents with new vessel objects for the same vessels.
    vessel_list[:] = new_vessel_list[:]
  
    return parallel.run_parallelized(vessel_list, process_func, *args)
    
  finally:
    # Unlock the nodes.
    lockserver.unlock_multiple_nodes(lockserver_handle, node_id_list)
Esempio n. 6
0
def get_vessel_list(vesselhandle_list):
  """
  <Purpose>
    Convert a list of vesselhandles into a list of Vessel objects.
  <Arguments>
    vesselhandle_list
      A list of strings where each string is a vesselhandle of the format
      "nodeid:vesselname"
  <Exceptions>
    DoesNotExistError
      If a specified vessel does not exist.
    InvalidRequestError
      If any vesselhandle in the list is not in the correct format.
  <Side Effects>
    None
  <Returns>
    A list of Vessel objects.
  """
  assert_list_of_str(vesselhandle_list)
  
  vessel_list = []
  
  for vesselhandle in vesselhandle_list:
    if len((vesselhandle.split(":"))) != 2:
      raise InvalidRequestError("Invalid vesselhandle: " + vesselhandle)
    
    (nodeid, vesselname) = vesselhandle.split(":")
    # Raises DoesNotExistError if there is no such node/vessel.
    vessel = maindb.get_vessel(nodeid, vesselname)
    vessel_list.append(vessel)
    
  return vessel_list
Esempio n. 7
0
def _parallel_process_vessels_from_list(vessel_list, process_func,
                                        lockserver_handle, *args):
    """
  Obtain locks on all of the nodes of vessels in vessel_list, get fresh vessel
  objects from the databae, and then parallelize a call to process_func to
  process each vessel in vessel_list (passing the additional *args to
  process_func).
  """

    node_id_list = []
    for vessel in vessel_list:
        node_id = maindb.get_node_identifier_from_vessel(vessel)
        # Lock names must be unique, and there could be multiple vessels from the
        # same node in the vessel_list.
        if node_id not in node_id_list:
            node_id_list.append(node_id)

    # Lock the nodes that these vessels are on.
    lockserver.lock_multiple_nodes(lockserver_handle, node_id_list)
    try:
        # Get new vessel objects from the db now that we have node locks.
        new_vessel_list = []
        for vessel in vessel_list:
            node_id = maindb.get_node_identifier_from_vessel(vessel)
            new_vessel_list.append(maindb.get_vessel(node_id, vessel.name))
        # Have the list object the caller may still be using contain the actual
        # vessel objects we have processed. That is, we've just replaced the
        # caller's list's contents with new vessel objects for the same vessels.
        vessel_list[:] = new_vessel_list[:]

        return parallel.run_parallelized(vessel_list, process_func, *args)

    finally:
        # Unlock the nodes.
        lockserver.unlock_multiple_nodes(lockserver_handle, node_id_list)
def ban_user_and_remove_vessels(username):

    try:
        geniuser = maindb.get_user(username, allow_inactive=True)
    except DoesNotExistError:
        print "No such user: %s." % username
        sys.exit(1)

    # Lock the user.
    lockserver_handle = lockserver.create_lockserver_handle()
    lockserver.lock_user(lockserver_handle, geniuser.username)

    try:
        if geniuser.is_active:
            geniuser.is_active = False
            geniuser.save()
            print "This account has been set to inactive (banned)."
        else:
            print "This account is already inactive (banned)."

        acquired_vessels = maindb.get_acquired_vessels(geniuser)
        if not acquired_vessels:
            print "No acquired vessels to stop/remove access to."
        else:
            print "Vessels acquired by this user: %s" % acquired_vessels
            print "Indicating to the backend to release %s vessels." % len(
                acquired_vessels)
            vessels.release_vessels(lockserver_handle, geniuser,
                                    acquired_vessels)
            print "Release indicated. Monitoring db to see if the backend cleaned them up."
            while True:
                for vessel in acquired_vessels[:]:
                    updated_vessel = maindb.get_vessel(
                        vessel.node.node_identifier, vessel.name)
                    if vessel.node.is_broken or not vessel.node.is_active:
                        print "Node %s is broken or inactive, so backend won't contact it." % vessel.node
                        acquired_vessels.remove(vessel)
                        continue

                    if updated_vessel.is_dirty:
                        print "Vessel %s has not been cleaned up yet." % updated_vessel
                    else:
                        print "Vessel %s has been cleaned up." % updated_vessel
                        acquired_vessels.remove(vessel)

                if not acquired_vessels:
                    print "All vessels have been cleaned up."
                    break
                else:
                    print "%s vessels remain to be cleaned up." % len(
                        acquired_vessels)

                print "Sleeping 10 seconds."
                time.sleep(10)

    finally:
        # Unlock the user.
        lockserver.unlock_user(lockserver_handle, geniuser.username)
        lockserver.destroy_lockserver_handle(lockserver_handle)
def _cleanup_single_vessel(vessel):
  """
  This function is passed by cleanup_vessels() as the function argument to
  run_parallelized().
  """
  
  # This does seem wasteful of lockserver communication to require four
  # round-trips with the lockserver (get handle, lock, unlock, release handle),
  # but if we really want to fix that then I think the best thing to do would
  # be to allow obtaining a lockhandle and releasing a lockhandle to be done
  # in the same calls as lock acquisition and release. 
  
  node_id = maindb.get_node_identifier_from_vessel(vessel)
  lockserver_handle = lockserver.create_lockserver_handle()
  
  # Lock the node that the vessels is on.
  lockserver.lock_node(lockserver_handle, node_id)
  try:
    # Get a new vessel object from the db in case it was modified in the db
    # before the lock was obtained.
    vessel = maindb.get_vessel(node_id, vessel.name)
    
    # Now that we have a lock on the node that this vessel is on, find out
    # if we should still clean up this vessel (e.g. maybe a node state
    # transition script moved the node to a new state and this vessel was
    # removed).
    needscleanup, reasonwhynot = maindb.does_vessel_need_cleanup(vessel)
    if not needscleanup:
      log.info("[_cleanup_single_vessel] Vessel " + str(vessel) + 
               " no longer needs cleanup: " + reasonwhynot)
      return
    
    nodeid = maindb.get_node_identifier_from_vessel(vessel)
    nodehandle = _get_node_handle_from_nodeid(nodeid)
    
    try:
      log.info("[_cleanup_single_vessel] About to ChangeUsers on vessel " + str(vessel))
      nodemanager.change_users(nodehandle, vessel.name, [''])
      log.info("[_cleanup_single_vessel] About to ResetVessel on vessel " + str(vessel))
      nodemanager.reset_vessel(nodehandle, vessel.name)
    except NodemanagerCommunicationError:
      # We don't pass this exception up. Maybe the node is offline now. At some
      # point, it will be marked in the database as offline (should we be doing
      # that here?). At that time, the dirty vessels on that node will not be
      # in the cleanup list anymore.
      log.info("[_cleanup_single_vessel] Failed to cleanup vessel " + 
               str(vessel) + ". " + traceback.format_exc())
      return
      
    # We only mark it as clean if no exception was raised when trying to
    # perform the above nodemanager operations.
    maindb.mark_vessel_as_clean(vessel)
  
    log.info("[_cleanup_single_vessel] Successfully cleaned up vessel " + str(vessel))

  finally:
    # Unlock the node.
    lockserver.unlock_node(lockserver_handle, node_id)
    lockserver.destroy_lockserver_handle(lockserver_handle)
def ban_user_and_remove_vessels(username):

  try:
    geniuser = maindb.get_user(username, allow_inactive=True)
  except DoesNotExistError:
    print "No such user: %s." % username
    sys.exit(1)

  # Lock the user.
  lockserver_handle = lockserver.create_lockserver_handle()
  lockserver.lock_user(lockserver_handle, geniuser.username)

  try:
    if geniuser.is_active:
      geniuser.is_active = False
      geniuser.save()
      print "This account has been set to inactive (banned)."
    else:
      print "This account is already inactive (banned)."

    acquired_vessels = maindb.get_acquired_vessels(geniuser)
    if not acquired_vessels:
      print "No acquired vessels to stop/remove access to."
    else:
      print "Vessels acquired by this user: %s" % acquired_vessels
      print "Indicating to the backend to release %s vessels." % len(acquired_vessels)
      vessels.release_vessels(lockserver_handle, geniuser, acquired_vessels)
      print "Release indicated. Monitoring db to see if the backend cleaned them up."
      while True:
        for vessel in acquired_vessels[:]:
          updated_vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
          if vessel.node.is_broken or not vessel.node.is_active:
            print "Node %s is broken or inactive, so backend won't contact it." % vessel.node
            acquired_vessels.remove(vessel)
            continue

          if updated_vessel.is_dirty:
            print "Vessel %s has not been cleaned up yet." % updated_vessel
          else:
            print "Vessel %s has been cleaned up." % updated_vessel
            acquired_vessels.remove(vessel)

        if not acquired_vessels:
          print "All vessels have been cleaned up."
          break
        else:
          print "%s vessels remain to be cleaned up." % len(acquired_vessels)

        print "Sleeping 10 seconds."
        time.sleep(10)

  finally:
    # Unlock the user.
    lockserver.unlock_user(lockserver_handle, geniuser.username)
    lockserver.destroy_lockserver_handle(lockserver_handle)
def stop_all_vessels_on_node(node_id):

    try:
        node = maindb.get_node(node_id)
    except DoesNotExistError:
        print "No such node"
        sys.exit(1)

    if not node.is_active:
        print "This node is marked as inactive, thus the backend will not try to clean up vessels."
        sys.exit(0)

    if node.is_broken:
        print "This node is marked as broken, thus the backend will not try to clean up vessels."
        sys.exit(0)

    vessels_on_node = maindb.get_vessels_on_node(node)
    if not vessels_on_node:
        print "No vessels on node."
        sys.exit(0)

    lockserver_handle = lockserver.create_lockserver_handle()
    try:
        print "Indicating to the backend to release/reset all %s vessels." % len(
            vessels_on_node)
        lockserver.lock_node(lockserver_handle, node_id)
        try:
            for vessel in vessels_on_node:
                maindb.record_released_vessel(vessel)
        finally:
            lockserver.unlock_node(lockserver_handle, node_id)

        print "Releases indicated. Monitoring db to see if the backend cleaned them up."
        while True:
            for vessel in vessels_on_node[:]:
                updated_vessel = maindb.get_vessel(node_id, vessel.name)
                if updated_vessel.is_dirty:
                    print "Vessel %s has not been cleaned up yet." % updated_vessel
                else:
                    print "Vessel %s has been cleaned up." % updated_vessel
                    vessels_on_node.remove(vessel)
            if not vessels_on_node:
                print "All vessels have been cleaned up."
                break
            else:
                print "%s vessels remain to be cleaned up." % len(
                    vessels_on_node)
            print "Sleeping 10 seconds."
            time.sleep(10)

    finally:
        lockserver.destroy_lockserver_handle(lockserver_handle)
def stop_all_vessels_on_node(node_id):

  try:
    node = maindb.get_node(node_id)
  except DoesNotExistError:
    print "No such node"
    sys.exit(1)

  if not node.is_active:
    print "This node is marked as inactive, thus the backend will not try to clean up vessels."
    sys.exit(0)

  if node.is_broken:
    print "This node is marked as broken, thus the backend will not try to clean up vessels."
    sys.exit(0)

  vessels_on_node = maindb.get_vessels_on_node(node)
  if not vessels_on_node:
    print "No vessels on node."
    sys.exit(0)

  lockserver_handle = lockserver.create_lockserver_handle()
  try:
    print "Indicating to the backend to release/reset all %s vessels." % len(vessels_on_node)
    lockserver.lock_node(lockserver_handle, node_id)
    try:
      for vessel in vessels_on_node:
        maindb.record_released_vessel(vessel)
    finally:
      lockserver.unlock_node(lockserver_handle, node_id)

    print "Releases indicated. Monitoring db to see if the backend cleaned them up."
    while True:
      for vessel in vessels_on_node[:]:
        updated_vessel = maindb.get_vessel(node_id, vessel.name)
        if updated_vessel.is_dirty:
          print "Vessel %s has not been cleaned up yet." % updated_vessel
        else:
          print "Vessel %s has been cleaned up." % updated_vessel
          vessels_on_node.remove(vessel)
      if not vessels_on_node:
        print "All vessels have been cleaned up."
        break
      else:
        print "%s vessels remain to be cleaned up." % len(vessels_on_node)
      print "Sleeping 10 seconds."
      time.sleep(10)

  finally:
    lockserver.destroy_lockserver_handle(lockserver_handle)
    def test_set_user_key(self):

        pubkey = "1 2"
        privkey = "3 4 5"
        donor_key = "6 7"

        # Create a user who will be doing the acquiring.
        user = maindb.create_user("testuser", "password",
                                  "*****@*****.**", "affiliation", pubkey,
                                  privkey, donor_key)
        userport = user.usable_vessel_port

        vesselcount = 4

        # Have every vessel acquisition to the backend request succeed.
        calls_results = [True] * vesselcount
        mocklib.mock_backend_acquire_vessel(calls_results)

        testutil.create_nodes_on_different_subnets(vesselcount, [userport])

        # Acquire all vessels on behalf of this user.
        all_vessels_list = interface.acquire_vessels(user, vesselcount, 'rand')

        # Release 2 vessels.
        released_vessels_list = all_vessels_list[:2]
        kept_vessels_list = all_vessels_list[2:]
        interface.release_vessels(user, released_vessels_list)

        # Ensure all of the vessels are marked as having user keys in sync.
        for vessel in all_vessels_list:
            # Get a fresh vessel from the db.
            vessel = maindb.get_vessel(vessel.node.node_identifier,
                                       vessel.name)
            self.assertTrue(vessel.user_keys_in_sync)

        # We expect no keys to be generated through the keygen api.
        mocklib.mock_keygen_generate_keypair([])

        interface.change_user_keys(user, pubkey="55 66")

        # Get a new user object from the database.
        user = maindb.get_user(user.username)

        # Make sure the user's key changed.
        self.assertEqual(user.user_pubkey, "55 66")
        self.assertEqual(user.user_privkey, None)

        # Make sure that all of the vessels the user has access to (and no other
        # vessels) are marked as needing user keys to be sync'd.
        # Ensure all of the vessels are marked as having user keys in sync.
        for vessel in kept_vessels_list:
            # Get a fresh vessel from the db.
            vessel = maindb.get_vessel(vessel.node.node_identifier,
                                       vessel.name)
            self.assertFalse(vessel.user_keys_in_sync)

        for vessel in released_vessels_list:
            # Get a fresh vessel from the db.
            vessel = maindb.get_vessel(vessel.node.node_identifier,
                                       vessel.name)
            self.assertTrue(vessel.user_keys_in_sync)
def _sync_user_keys_of_single_vessel(vessel):
  """
  This function is passed by sync_user_keys_of_vessels() as the function
  argument to run_parallelized().
  """
  
  # This does seem wasteful of lockserver communication to require four
  # round-trips with the lockserver (get handle, lock, unlock, release handle),
  # but if we really want to fix that then I think the best thing to do would
  # be to allow obtaining a lockhandle and releasing a lockhandle to be done
  # in the same calls as lock acquisition and release. 
  
  node_id = maindb.get_node_identifier_from_vessel(vessel)
  lockserver_handle = lockserver.create_lockserver_handle()
  
  # Lock the node that the vessels is on.
  lockserver.lock_node(lockserver_handle, node_id)
  try:
    # Get a new vessel object from the db in case it was modified in the db
    # before the lock was obtained.
    vessel = maindb.get_vessel(node_id, vessel.name)
  
    # Now that we have a lock on the node that this vessel is on, find out
    # if we should still sync user keys on this vessel (e.g. maybe a node state
    # transition script moved the node to a new state and this vessel was
    # removed).
    needssync, reasonwhynot = maindb.does_vessel_need_user_key_sync(vessel)
    if not needssync:
      log.info("[_sync_user_keys_of_single_vessel] Vessel " + str(vessel) + 
               " no longer needs user key sync: " + reasonwhynot)
      return
    
    nodeid = maindb.get_node_identifier_from_vessel(vessel)
    nodehandle = _get_node_handle_from_nodeid(nodeid)
    
    # The list returned from get_users_with_access_to_vessel includes the key of
    # the user who has acquired the vessel along with any other users they have
    # given access to.
    user_list = maindb.get_users_with_access_to_vessel(vessel)
    
    key_list = []
    for user in user_list:
      key_list.append(user.user_pubkey)
      
    if len(key_list) == 0:
      raise InternalError("InternalError: Empty user key list for vessel " + str(vessel))
    
    try:
      log.info("[_sync_user_keys_of_single_vessel] About to ChangeUsers on vessel " + str(vessel))
      nodemanager.change_users(nodehandle, vessel.name, key_list)
    except NodemanagerCommunicationError:
      # We don't pass this exception up. Maybe the node is offline now. At some
      # point, it will be marked in the database as offline and won't show up in
      # our list of vessels to sync user keys of anymore.
      log.info("[_sync_user_keys_of_single_vessel] Failed to sync user keys of vessel " + 
               str(vessel) + ". " + traceback.format_exc())
      return
      
    # We only mark it as sync'd if no exception was raised when trying to perform
    # the above nodemanager operations.
    maindb.mark_vessel_as_not_needing_user_key_sync(vessel)
  
    log.info("[_sync_user_keys_of_single_vessel] Successfully sync'd user keys of vessel " + str(vessel))

  finally:
    # Unlock the node.
    lockserver.unlock_node(lockserver_handle, node_id)
    lockserver.destroy_lockserver_handle(lockserver_handle)
def _sync_user_keys_of_single_vessel(vessel):
    """
  This function is passed by sync_user_keys_of_vessels() as the function
  argument to run_parallelized().
  """

    # This does seem wasteful of lockserver communication to require four
    # round-trips with the lockserver (get handle, lock, unlock, release handle),
    # but if we really want to fix that then I think the best thing to do would
    # be to allow obtaining a lockhandle and releasing a lockhandle to be done
    # in the same calls as lock acquisition and release.

    node_id = maindb.get_node_identifier_from_vessel(vessel)
    lockserver_handle = lockserver.create_lockserver_handle()

    # Lock the node that the vessels is on.
    lockserver.lock_node(lockserver_handle, node_id)
    try:
        # Get a new vessel object from the db in case it was modified in the db
        # before the lock was obtained.
        vessel = maindb.get_vessel(node_id, vessel.name)

        # Now that we have a lock on the node that this vessel is on, find out
        # if we should still sync user keys on this vessel (e.g. maybe a node state
        # transition script moved the node to a new state and this vessel was
        # removed).
        needssync, reasonwhynot = maindb.does_vessel_need_user_key_sync(vessel)
        if not needssync:
            log.info("[_sync_user_keys_of_single_vessel] Vessel " +
                     str(vessel) + " no longer needs user key sync: " +
                     reasonwhynot)
            return

        nodeid = maindb.get_node_identifier_from_vessel(vessel)
        nodehandle = _get_node_handle_from_nodeid(nodeid)

        # The list returned from get_users_with_access_to_vessel includes the key of
        # the user who has acquired the vessel along with any other users they have
        # given access to.
        user_list = maindb.get_users_with_access_to_vessel(vessel)

        key_list = []
        for user in user_list:
            key_list.append(user.user_pubkey)

        if len(key_list) == 0:
            raise InternalError(
                "InternalError: Empty user key list for vessel " + str(vessel))

        try:
            log.info(
                "[_sync_user_keys_of_single_vessel] About to ChangeUsers on vessel "
                + str(vessel))
            nodemanager.change_users(nodehandle, vessel.name, key_list)
        except NodemanagerCommunicationError:
            # We don't pass this exception up. Maybe the node is offline now. At some
            # point, it will be marked in the database as offline and won't show up in
            # our list of vessels to sync user keys of anymore.
            log.info(
                "[_sync_user_keys_of_single_vessel] Failed to sync user keys of vessel "
                + str(vessel) + ". " + traceback.format_exc())
            return

        # We only mark it as sync'd if no exception was raised when trying to perform
        # the above nodemanager operations.
        maindb.mark_vessel_as_not_needing_user_key_sync(vessel)

        log.info(
            "[_sync_user_keys_of_single_vessel] Successfully sync'd user keys of vessel "
            + str(vessel))

    finally:
        # Unlock the node.
        lockserver.unlock_node(lockserver_handle, node_id)
        lockserver.destroy_lockserver_handle(lockserver_handle)
def _cleanup_single_vessel(vessel):
    """
  This function is passed by cleanup_vessels() as the function argument to
  run_parallelized().
  """

    # This does seem wasteful of lockserver communication to require four
    # round-trips with the lockserver (get handle, lock, unlock, release handle),
    # but if we really want to fix that then I think the best thing to do would
    # be to allow obtaining a lockhandle and releasing a lockhandle to be done
    # in the same calls as lock acquisition and release.

    node_id = maindb.get_node_identifier_from_vessel(vessel)
    lockserver_handle = lockserver.create_lockserver_handle()

    # Lock the node that the vessels is on.
    lockserver.lock_node(lockserver_handle, node_id)
    try:
        # Get a new vessel object from the db in case it was modified in the db
        # before the lock was obtained.
        vessel = maindb.get_vessel(node_id, vessel.name)

        # Now that we have a lock on the node that this vessel is on, find out
        # if we should still clean up this vessel (e.g. maybe a node state
        # transition script moved the node to a new state and this vessel was
        # removed).
        needscleanup, reasonwhynot = maindb.does_vessel_need_cleanup(vessel)
        if not needscleanup:
            log.info("[_cleanup_single_vessel] Vessel " + str(vessel) +
                     " no longer needs cleanup: " + reasonwhynot)
            return

        nodeid = maindb.get_node_identifier_from_vessel(vessel)
        nodehandle = _get_node_handle_from_nodeid(nodeid)

        try:
            log.info(
                "[_cleanup_single_vessel] About to ChangeUsers on vessel " +
                str(vessel))
            nodemanager.change_users(nodehandle, vessel.name, [''])
            log.info(
                "[_cleanup_single_vessel] About to ResetVessel on vessel " +
                str(vessel))
            nodemanager.reset_vessel(nodehandle, vessel.name)
        except NodemanagerCommunicationError:
            # We don't pass this exception up. Maybe the node is offline now. At some
            # point, it will be marked in the database as offline (should we be doing
            # that here?). At that time, the dirty vessels on that node will not be
            # in the cleanup list anymore.
            log.info("[_cleanup_single_vessel] Failed to cleanup vessel " +
                     str(vessel) + ". " + traceback.format_exc())
            return

        # We only mark it as clean if no exception was raised when trying to
        # perform the above nodemanager operations.
        maindb.mark_vessel_as_clean(vessel)

        log.info("[_cleanup_single_vessel] Successfully cleaned up vessel " +
                 str(vessel))

    finally:
        # Unlock the node.
        lockserver.unlock_node(lockserver_handle, node_id)
        lockserver.destroy_lockserver_handle(lockserver_handle)