def _cleanup_single_vessel(vessel):
  """
  This function is passed by cleanup_vessels() as the function argument to
  run_parallelized().
  """
  
  # This does seem wasteful of lockserver communication to require four
  # round-trips with the lockserver (get handle, lock, unlock, release handle),
  # but if we really want to fix that then I think the best thing to do would
  # be to allow obtaining a lockhandle and releasing a lockhandle to be done
  # in the same calls as lock acquisition and release. 
  
  node_id = maindb.get_node_identifier_from_vessel(vessel)
  lockserver_handle = lockserver.create_lockserver_handle()
  
  # Lock the node that the vessels is on.
  lockserver.lock_node(lockserver_handle, node_id)
  try:
    # Get a new vessel object from the db in case it was modified in the db
    # before the lock was obtained.
    vessel = maindb.get_vessel(node_id, vessel.name)
    
    # Now that we have a lock on the node that this vessel is on, find out
    # if we should still clean up this vessel (e.g. maybe a node state
    # transition script moved the node to a new state and this vessel was
    # removed).
    needscleanup, reasonwhynot = maindb.does_vessel_need_cleanup(vessel)
    if not needscleanup:
      log.info("[_cleanup_single_vessel] Vessel " + str(vessel) + 
               " no longer needs cleanup: " + reasonwhynot)
      return
    
    nodeid = maindb.get_node_identifier_from_vessel(vessel)
    nodehandle = _get_node_handle_from_nodeid(nodeid)
    
    try:
      log.info("[_cleanup_single_vessel] About to ChangeUsers on vessel " + str(vessel))
      nodemanager.change_users(nodehandle, vessel.name, [''])
      log.info("[_cleanup_single_vessel] About to ResetVessel on vessel " + str(vessel))
      nodemanager.reset_vessel(nodehandle, vessel.name)
    except NodemanagerCommunicationError:
      # We don't pass this exception up. Maybe the node is offline now. At some
      # point, it will be marked in the database as offline (should we be doing
      # that here?). At that time, the dirty vessels on that node will not be
      # in the cleanup list anymore.
      log.info("[_cleanup_single_vessel] Failed to cleanup vessel " + 
               str(vessel) + ". " + traceback.format_exc())
      return
      
    # We only mark it as clean if no exception was raised when trying to
    # perform the above nodemanager operations.
    maindb.mark_vessel_as_clean(vessel)
  
    log.info("[_cleanup_single_vessel] Successfully cleaned up vessel " + str(vessel))

  finally:
    # Unlock the node.
    lockserver.unlock_node(lockserver_handle, node_id)
    lockserver.destroy_lockserver_handle(lockserver_handle)
def stop_all_vessels_on_node(node_id):

    try:
        node = maindb.get_node(node_id)
    except DoesNotExistError:
        print "No such node"
        sys.exit(1)

    if not node.is_active:
        print "This node is marked as inactive, thus the backend will not try to clean up vessels."
        sys.exit(0)

    if node.is_broken:
        print "This node is marked as broken, thus the backend will not try to clean up vessels."
        sys.exit(0)

    vessels_on_node = maindb.get_vessels_on_node(node)
    if not vessels_on_node:
        print "No vessels on node."
        sys.exit(0)

    lockserver_handle = lockserver.create_lockserver_handle()
    try:
        print "Indicating to the backend to release/reset all %s vessels." % len(
            vessels_on_node)
        lockserver.lock_node(lockserver_handle, node_id)
        try:
            for vessel in vessels_on_node:
                maindb.record_released_vessel(vessel)
        finally:
            lockserver.unlock_node(lockserver_handle, node_id)

        print "Releases indicated. Monitoring db to see if the backend cleaned them up."
        while True:
            for vessel in vessels_on_node[:]:
                updated_vessel = maindb.get_vessel(node_id, vessel.name)
                if updated_vessel.is_dirty:
                    print "Vessel %s has not been cleaned up yet." % updated_vessel
                else:
                    print "Vessel %s has been cleaned up." % updated_vessel
                    vessels_on_node.remove(vessel)
            if not vessels_on_node:
                print "All vessels have been cleaned up."
                break
            else:
                print "%s vessels remain to be cleaned up." % len(
                    vessels_on_node)
            print "Sleeping 10 seconds."
            time.sleep(10)

    finally:
        lockserver.destroy_lockserver_handle(lockserver_handle)
Exemplo n.º 3
0
def acquire_node_lock(nodeID):
  """ 
  Create a lockserver handle and acquire a lock
  and return the lockserver handle
  """
  # Initialize a lock.
  lockserver_handle = lockserver.create_lockserver_handle()
  log("Created lockserver_handle for use on node: "+nodeID)
  # Acquire a lock for the node.
  lockserver.lock_node(lockserver_handle, nodeID)
  log("Acquired node lock for nodeID: "+nodeID)
 
  return lockserver_handle
def acquire_node_lock(nodeID):
  """ 
  Create a lockserver handle and acquire a lock
  and return the lockserver handle
  """
  # Initialize a lock.
  lockserver_handle = lockserver.create_lockserver_handle()
  log("Created lockserver_handle for use on node: "+nodeID)
  # Acquire a lock for the node.
  lockserver.lock_node(lockserver_handle, nodeID)
  log("Acquired node lock for nodeID: "+nodeID)
 
  return lockserver_handle
def stop_all_vessels_on_node(node_id):

  try:
    node = maindb.get_node(node_id)
  except DoesNotExistError:
    print "No such node"
    sys.exit(1)

  if not node.is_active:
    print "This node is marked as inactive, thus the backend will not try to clean up vessels."
    sys.exit(0)

  if node.is_broken:
    print "This node is marked as broken, thus the backend will not try to clean up vessels."
    sys.exit(0)

  vessels_on_node = maindb.get_vessels_on_node(node)
  if not vessels_on_node:
    print "No vessels on node."
    sys.exit(0)

  lockserver_handle = lockserver.create_lockserver_handle()
  try:
    print "Indicating to the backend to release/reset all %s vessels." % len(vessels_on_node)
    lockserver.lock_node(lockserver_handle, node_id)
    try:
      for vessel in vessels_on_node:
        maindb.record_released_vessel(vessel)
    finally:
      lockserver.unlock_node(lockserver_handle, node_id)

    print "Releases indicated. Monitoring db to see if the backend cleaned them up."
    while True:
      for vessel in vessels_on_node[:]:
        updated_vessel = maindb.get_vessel(node_id, vessel.name)
        if updated_vessel.is_dirty:
          print "Vessel %s has not been cleaned up yet." % updated_vessel
        else:
          print "Vessel %s has been cleaned up." % updated_vessel
          vessels_on_node.remove(vessel)
      if not vessels_on_node:
        print "All vessels have been cleaned up."
        break
      else:
        print "%s vessels remain to be cleaned up." % len(vessels_on_node)
      print "Sleeping 10 seconds."
      time.sleep(10)

  finally:
    lockserver.destroy_lockserver_handle(lockserver_handle)
def _sync_user_keys_of_single_vessel(vessel):
  """
  This function is passed by sync_user_keys_of_vessels() as the function
  argument to run_parallelized().
  """
  
  # This does seem wasteful of lockserver communication to require four
  # round-trips with the lockserver (get handle, lock, unlock, release handle),
  # but if we really want to fix that then I think the best thing to do would
  # be to allow obtaining a lockhandle and releasing a lockhandle to be done
  # in the same calls as lock acquisition and release. 
  
  node_id = maindb.get_node_identifier_from_vessel(vessel)
  lockserver_handle = lockserver.create_lockserver_handle()
  
  # Lock the node that the vessels is on.
  lockserver.lock_node(lockserver_handle, node_id)
  try:
    # Get a new vessel object from the db in case it was modified in the db
    # before the lock was obtained.
    vessel = maindb.get_vessel(node_id, vessel.name)
  
    # Now that we have a lock on the node that this vessel is on, find out
    # if we should still sync user keys on this vessel (e.g. maybe a node state
    # transition script moved the node to a new state and this vessel was
    # removed).
    needssync, reasonwhynot = maindb.does_vessel_need_user_key_sync(vessel)
    if not needssync:
      log.info("[_sync_user_keys_of_single_vessel] Vessel " + str(vessel) + 
               " no longer needs user key sync: " + reasonwhynot)
      return
    
    nodeid = maindb.get_node_identifier_from_vessel(vessel)
    nodehandle = _get_node_handle_from_nodeid(nodeid)
    
    # The list returned from get_users_with_access_to_vessel includes the key of
    # the user who has acquired the vessel along with any other users they have
    # given access to.
    user_list = maindb.get_users_with_access_to_vessel(vessel)
    
    key_list = []
    for user in user_list:
      key_list.append(user.user_pubkey)
      
    if len(key_list) == 0:
      raise InternalError("InternalError: Empty user key list for vessel " + str(vessel))
    
    try:
      log.info("[_sync_user_keys_of_single_vessel] About to ChangeUsers on vessel " + str(vessel))
      nodemanager.change_users(nodehandle, vessel.name, key_list)
    except NodemanagerCommunicationError:
      # We don't pass this exception up. Maybe the node is offline now. At some
      # point, it will be marked in the database as offline and won't show up in
      # our list of vessels to sync user keys of anymore.
      log.info("[_sync_user_keys_of_single_vessel] Failed to sync user keys of vessel " + 
               str(vessel) + ". " + traceback.format_exc())
      return
      
    # We only mark it as sync'd if no exception was raised when trying to perform
    # the above nodemanager operations.
    maindb.mark_vessel_as_not_needing_user_key_sync(vessel)
  
    log.info("[_sync_user_keys_of_single_vessel] Successfully sync'd user keys of vessel " + str(vessel))

  finally:
    # Unlock the node.
    lockserver.unlock_node(lockserver_handle, node_id)
    lockserver.destroy_lockserver_handle(lockserver_handle)
Exemplo n.º 7
0
def _sync_user_keys_of_single_vessel(vessel):
    """
  This function is passed by sync_user_keys_of_vessels() as the function
  argument to run_parallelized().
  """

    # This does seem wasteful of lockserver communication to require four
    # round-trips with the lockserver (get handle, lock, unlock, release handle),
    # but if we really want to fix that then I think the best thing to do would
    # be to allow obtaining a lockhandle and releasing a lockhandle to be done
    # in the same calls as lock acquisition and release.

    node_id = maindb.get_node_identifier_from_vessel(vessel)
    lockserver_handle = lockserver.create_lockserver_handle()

    # Lock the node that the vessels is on.
    lockserver.lock_node(lockserver_handle, node_id)
    try:
        # Get a new vessel object from the db in case it was modified in the db
        # before the lock was obtained.
        vessel = maindb.get_vessel(node_id, vessel.name)

        # Now that we have a lock on the node that this vessel is on, find out
        # if we should still sync user keys on this vessel (e.g. maybe a node state
        # transition script moved the node to a new state and this vessel was
        # removed).
        needssync, reasonwhynot = maindb.does_vessel_need_user_key_sync(vessel)
        if not needssync:
            log.info("[_sync_user_keys_of_single_vessel] Vessel " +
                     str(vessel) + " no longer needs user key sync: " +
                     reasonwhynot)
            return

        nodeid = maindb.get_node_identifier_from_vessel(vessel)
        nodehandle = _get_node_handle_from_nodeid(nodeid)

        # The list returned from get_users_with_access_to_vessel includes the key of
        # the user who has acquired the vessel along with any other users they have
        # given access to.
        user_list = maindb.get_users_with_access_to_vessel(vessel)

        key_list = []
        for user in user_list:
            key_list.append(user.user_pubkey)

        if len(key_list) == 0:
            raise InternalError(
                "InternalError: Empty user key list for vessel " + str(vessel))

        try:
            log.info(
                "[_sync_user_keys_of_single_vessel] About to ChangeUsers on vessel "
                + str(vessel))
            nodemanager.change_users(nodehandle, vessel.name, key_list)
        except NodemanagerCommunicationError:
            # We don't pass this exception up. Maybe the node is offline now. At some
            # point, it will be marked in the database as offline and won't show up in
            # our list of vessels to sync user keys of anymore.
            log.info(
                "[_sync_user_keys_of_single_vessel] Failed to sync user keys of vessel "
                + str(vessel) + ". " + traceback.format_exc())
            return

        # We only mark it as sync'd if no exception was raised when trying to perform
        # the above nodemanager operations.
        maindb.mark_vessel_as_not_needing_user_key_sync(vessel)

        log.info(
            "[_sync_user_keys_of_single_vessel] Successfully sync'd user keys of vessel "
            + str(vessel))

    finally:
        # Unlock the node.
        lockserver.unlock_node(lockserver_handle, node_id)
        lockserver.destroy_lockserver_handle(lockserver_handle)
Exemplo n.º 8
0
def _cleanup_single_vessel(vessel):
    """
  This function is passed by cleanup_vessels() as the function argument to
  run_parallelized().
  """

    # This does seem wasteful of lockserver communication to require four
    # round-trips with the lockserver (get handle, lock, unlock, release handle),
    # but if we really want to fix that then I think the best thing to do would
    # be to allow obtaining a lockhandle and releasing a lockhandle to be done
    # in the same calls as lock acquisition and release.

    node_id = maindb.get_node_identifier_from_vessel(vessel)
    lockserver_handle = lockserver.create_lockserver_handle()

    # Lock the node that the vessels is on.
    lockserver.lock_node(lockserver_handle, node_id)
    try:
        # Get a new vessel object from the db in case it was modified in the db
        # before the lock was obtained.
        vessel = maindb.get_vessel(node_id, vessel.name)

        # Now that we have a lock on the node that this vessel is on, find out
        # if we should still clean up this vessel (e.g. maybe a node state
        # transition script moved the node to a new state and this vessel was
        # removed).
        needscleanup, reasonwhynot = maindb.does_vessel_need_cleanup(vessel)
        if not needscleanup:
            log.info("[_cleanup_single_vessel] Vessel " + str(vessel) +
                     " no longer needs cleanup: " + reasonwhynot)
            return

        nodeid = maindb.get_node_identifier_from_vessel(vessel)
        nodehandle = _get_node_handle_from_nodeid(nodeid)

        try:
            log.info(
                "[_cleanup_single_vessel] About to ChangeUsers on vessel " +
                str(vessel))
            nodemanager.change_users(nodehandle, vessel.name, [''])
            log.info(
                "[_cleanup_single_vessel] About to ResetVessel on vessel " +
                str(vessel))
            nodemanager.reset_vessel(nodehandle, vessel.name)
        except NodemanagerCommunicationError:
            # We don't pass this exception up. Maybe the node is offline now. At some
            # point, it will be marked in the database as offline (should we be doing
            # that here?). At that time, the dirty vessels on that node will not be
            # in the cleanup list anymore.
            log.info("[_cleanup_single_vessel] Failed to cleanup vessel " +
                     str(vessel) + ". " + traceback.format_exc())
            return

        # We only mark it as clean if no exception was raised when trying to
        # perform the above nodemanager operations.
        maindb.mark_vessel_as_clean(vessel)

        log.info("[_cleanup_single_vessel] Successfully cleaned up vessel " +
                 str(vessel))

    finally:
        # Unlock the node.
        lockserver.unlock_node(lockserver_handle, node_id)
        lockserver.destroy_lockserver_handle(lockserver_handle)
Exemplo n.º 9
0
def check_node(node, readonly=True, lockserver_handle=None):
  """
  <Purpose>
    Check a node for problems. This will try to contact the node and will
    compare the information retrieved from the node to the information we
    have in our database. It will log and collect the information about
    the problems. The problem information can be retrieved program
  <Arguments>
    node
      The Node object of the node to be checked.
    readonly
      False if the function should mark the node in the database as inactive
      or broken (and vessels released) when appropriate, True if it should
      never change anything in the database. Default is True.
    lockserver_handle
      If an existing lockserver handle should be used for lock acquisitions,
      it should be provided here. Otherwise, a new lockserver handle will
      be used the during of this function call.
      Note: no locking is done if readonly is True. That is, if there is
      no reason to lock a node, there is no reason to provide a
      lockserver_handle.
  <Exceptions>
    None
  <Side Effects>
    If readonly is False, the database may be updated appropriately based on
    what the function sees. No changes are ever directly made to the nodes
    through nodemanager communication regardless of the setting of readonly.
    However, other scripts might take action based on database changes (e.g.
    released vessel will quickly be cleaned up by the backend daemon).
  <Returns>
    None
  """
    
  if not readonly:
    must_destroy_lockserver_handle = False
    
    if lockserver_handle is None:
      must_destroy_lockserver_handle = True
      lockserver_handle = lockserver.create_lockserver_handle()
      
    if not readonly:
      lockserver.lock_node(lockserver_handle, node.node_identifier)
    
  # Be sure to release the node lock, if we are locking the node.
  try:
    # Get a fresh node record from the database. It might have changed before
    # we obtained the lock.
    node = maindb.get_node(node.node_identifier)
    
    # The code beyond this point would be a good candidate for splitting out
    # into a few smaller functions for readability.
    
    donation_list = maindb.get_donations_from_node(node)
    if len(donation_list) == 0:
      _report_node_problem(node, "The node has no corresponding donation records. " +
                           "Not marking node broken, though.")
    
    try:
      nodeinfo = nodemanager.get_node_info(node.last_known_ip, node.last_known_port)
    except NodemanagerCommunicationError:
      _record_node_communication_failure(readonly, node)
      _report_node_problem(node, "Can't communicate with node.")
      return
    
    try:
      nodekey_str = rsa.rsa_publickey_to_string(nodeinfo["nodekey"])
    except ValueError:
      _mark_node_broken(readonly, node)
      _report_node_problem(node, "Invalid nodekey: " + str(nodeinfo["nodekey"]))
      return
    
    # Check that the nodeid matches. If it doesn't, it probably means seattle
    # was reinstalled or there is a different system at that address now.
    if node.node_identifier != nodekey_str:
      _mark_node_inactive(readonly, node)
      _report_node_problem(node, "Wrong node identifier, the node reports: " + str(nodeinfo["nodekey"]))
      # Not much more worth checking in this case.
      return
    
    
    # Check that the database thinks it knows the extra vessel name.
    if node.extra_vessel_name == "":
      _mark_node_broken(readonly, node)
      _report_node_problem(node, "No extra_vessel_name in the database.")
      # Not much more worth checking in this case.
      return
    
    # Check that a vessel by the name of extra_vessel_name exists on the node.
    if node.extra_vessel_name not in nodeinfo["vessels"]:
      _mark_node_broken(readonly, node)
      _report_node_problem(node, "The extra_vessel_name in the database is a vessel name that doesn't exist on the node.")
      # Not much more worth checking in this case.
      return
    
    extravesselinfo = nodeinfo["vessels"][node.extra_vessel_name]
        
    vessels_in_db = maindb.get_vessels_on_node(node)
  
    if len(extravesselinfo["userkeys"]) != 1:
      _mark_node_broken(readonly, node)
      _report_node_problem(node, "The extra vessel '" + node.extra_vessel_name + 
                          "' doesn't have 1 user key, it has " + 
                          str(len(extravesselinfo["userkeys"])))
  
    else:    
      # Figure out which state the node is in according to the state key.
      recognized_state_name = ""
    
      for statename in statekeys:
        if statekeys[statename] == extravesselinfo["userkeys"][0]:
          recognized_state_name = statename
    
      if not recognized_state_name:
        _mark_node_broken(readonly, node)
        _report_node_problem(node, "The extra vessel '" + node.extra_vessel_name + 
                            "' doesn't have a recognized user/state key")
    
      if len(vessels_in_db) == 0:
        if recognized_state_name == "onepercentmanyevents" or recognized_state_name == "twopercent":
          # We don't mark it as broken because it may be in transition by a
          # transition script away from onepercentmanyevents. If the vessels
          # in the db have been deleted first but the state key hasn't been
          # changed yet, we might hit this. Also, it's not so bad to have it
          # not be marked as broken when it's like this, as it has no vessels
          # we know about, anyways, so we're not going to be giving questionable
          # resources to users because of it.
          _report_node_problem(node, "The node is in the " + recognized_state_name + " state " + 
                              "but we don't have any vessels for it in the database.")
      else:
        if recognized_state_name != "onepercentmanyevents" and recognized_state_name != "twopercent":
          # We don't mark it as broken because it may be in transition by a
          # transition script. Also, we may have other states in the future
          # besides onepercentmanyevents that have vessels. We don't want
          # to make all of those nodes inactive if it's just an issue of
          # someone forgot to update this script.
          _report_node_problem(node, "The node is in the '" + recognized_state_name + 
                              "' state but we have vessels for it in the database.")
      
    known_vessel_names = []
    for vessel in vessels_in_db:
      known_vessel_names.append(vessel.name)
  
    # Look for vessels on the node with our node ownerkey which aren't in our database.
    for actualvesselname in nodeinfo["vessels"]:
  
      vessel_ownerkey = nodeinfo["vessels"][actualvesselname]["ownerkey"]
      
      try:
        vessel_ownerkey_str = rsa.rsa_publickey_to_string(vessel_ownerkey)
      except ValueError:
        # At this point we aren't sure it's our node, but let's assume that if
        # there's an invalid key then the node is broken, period.
        _mark_node_broken(readonly, node)
        _report_node_problem(node, "Invalid vessel ownerkey: " + str(vessel_ownerkey))
        return
      
      if vessel_ownerkey_str == node.owner_pubkey:
        if actualvesselname not in known_vessel_names and actualvesselname != node.extra_vessel_name:
          _mark_node_broken(readonly, node)
          _report_node_problem(node, "The vessel '" + actualvesselname + "' exists on the node " + 
                              "with the ownerkey for the node, but it's not in our vessels table.")
  
    # Do some checking on each vessel we have in our database.
    for vessel in vessels_in_db:
      
      # Check that the vessel in our database actually exists on the node.
      if vessel.name not in nodeinfo["vessels"]:
        _mark_node_broken(readonly, node)
        _report_node_problem(node, "The vessel '" + vessel.name + "' in our db doesn't exist on the node.")
        continue
  
      vesselinfo = nodeinfo["vessels"][vessel.name]
  
      try:
        vessel_ownerkey_str = rsa.rsa_publickey_to_string(vesselinfo["ownerkey"])
      except ValueError:
        _mark_node_broken(readonly, node)
        _report_node_problem(node, "Invalid vessel ownerkey on a vessel in our db: " + str(vessel_ownerkey))
        return
  
      # Check that the owner key for the vessel is what we have for the node's owner key in our database.
      if node.owner_pubkey != vessel_ownerkey_str:
        _mark_node_broken(readonly, node)
        _report_node_problem(node, "The vessel '" + vessel.name + "' doesn't have the ownerkey we use for the node.")
      
      if not vesselinfo["advertise"]:
        _mark_node_broken(readonly, node)
        _report_node_problem(node, "The vessel '" + vessel.name + "' isn't advertising.")
      
      # We're only concerned with non-dirty vessels as the backend daemon
      # should be working on cleaning up dirty vessels.
      if not vessel.is_dirty:
        # Check that the user keys that have access are the ones that should have access.
        users_with_access = maindb.get_users_with_access_to_vessel(vessel)
        
        if len(users_with_access) != len(vesselinfo["userkeys"]):
          _release_vessel(readonly, vessel)
          _report_node_problem(node, "The vessel '" + vessel.name + "' reports " + 
                              str(len(vesselinfo["userkeys"])) + " user keys, but we expected " + str(len(users_with_access)))
          
        for user in users_with_access:
          if rsa.rsa_string_to_publickey(user.user_pubkey) not in vesselinfo["userkeys"]:
            _release_vessel(readonly, vessel)
            _report_node_problem(node, "The vessel '" + vessel.name + "' doesn't have the userkey for user " + user.username + ".")

  finally:
    # We didn't do any locking if this readonly was True.
    if not readonly:
      
      # Release the lock
      lockserver.unlock_node(lockserver_handle, node.node_identifier)
      
      # Destroy the lockserver handle if we created it ourselves.
      if must_destroy_lockserver_handle:
        lockserver.destroy_lockserver_handle(lockserver_handle)
Exemplo n.º 10
0
def check_node(node, readonly=True, lockserver_handle=None):
    """
  <Purpose>
    Check a node for problems. This will try to contact the node and will
    compare the information retrieved from the node to the information we
    have in our database. It will log and collect the information about
    the problems. The problem information can be retrieved program
  <Arguments>
    node
      The Node object of the node to be checked.
    readonly
      False if the function should mark the node in the database as inactive
      or broken (and vessels released) when appropriate, True if it should
      never change anything in the database. Default is True.
    lockserver_handle
      If an existing lockserver handle should be used for lock acquisitions,
      it should be provided here. Otherwise, a new lockserver handle will
      be used the during of this function call.
      Note: no locking is done if readonly is True. That is, if there is
      no reason to lock a node, there is no reason to provide a
      lockserver_handle.
  <Exceptions>
    None
  <Side Effects>
    If readonly is False, the database may be updated appropriately based on
    what the function sees. No changes are ever directly made to the nodes
    through nodemanager communication regardless of the setting of readonly.
    However, other scripts might take action based on database changes (e.g.
    released vessel will quickly be cleaned up by the backend daemon).
  <Returns>
    None
  """

    if not readonly:
        must_destroy_lockserver_handle = False

        if lockserver_handle is None:
            must_destroy_lockserver_handle = True
            lockserver_handle = lockserver.create_lockserver_handle()

        if not readonly:
            lockserver.lock_node(lockserver_handle, node.node_identifier)

    # Be sure to release the node lock, if we are locking the node.
    try:
        # Get a fresh node record from the database. It might have changed before
        # we obtained the lock.
        node = maindb.get_node(node.node_identifier)

        # The code beyond this point would be a good candidate for splitting out
        # into a few smaller functions for readability.

        donation_list = maindb.get_donations_from_node(node)
        if len(donation_list) == 0:
            _report_node_problem(
                node, "The node has no corresponding donation records. " +
                "Not marking node broken, though.")

        try:
            nodeinfo = nodemanager.get_node_info(node.last_known_ip,
                                                 node.last_known_port)
        except NodemanagerCommunicationError:
            _record_node_communication_failure(readonly, node)
            _report_node_problem(node, "Can't communicate with node.")
            return

        try:
            nodekey_str = rsa.rsa_publickey_to_string(nodeinfo["nodekey"])
        except ValueError:
            _mark_node_broken(readonly, node)
            _report_node_problem(
                node, "Invalid nodekey: " + str(nodeinfo["nodekey"]))
            return

        # Check that the nodeid matches. If it doesn't, it probably means seattle
        # was reinstalled or there is a different system at that address now.
        if node.node_identifier != nodekey_str:
            _mark_node_inactive(readonly, node)
            _report_node_problem(
                node, "Wrong node identifier, the node reports: " +
                str(nodeinfo["nodekey"]))
            # Not much more worth checking in this case.
            return

        # Check that the database thinks it knows the extra vessel name.
        if node.extra_vessel_name == "":
            _mark_node_broken(readonly, node)
            _report_node_problem(node, "No extra_vessel_name in the database.")
            # Not much more worth checking in this case.
            return

        # Check that a vessel by the name of extra_vessel_name exists on the node.
        if node.extra_vessel_name not in nodeinfo["vessels"]:
            _mark_node_broken(readonly, node)
            _report_node_problem(
                node,
                "The extra_vessel_name in the database is a vessel name that doesn't exist on the node."
            )
            # Not much more worth checking in this case.
            return

        extravesselinfo = nodeinfo["vessels"][node.extra_vessel_name]

        vessels_in_db = maindb.get_vessels_on_node(node)

        if len(extravesselinfo["userkeys"]) != 1:
            _mark_node_broken(readonly, node)
            _report_node_problem(
                node, "The extra vessel '" + node.extra_vessel_name +
                "' doesn't have 1 user key, it has " +
                str(len(extravesselinfo["userkeys"])))

        else:
            # Figure out which state the node is in according to the state key.
            recognized_state_name = ""

            for statename in statekeys:
                if statekeys[statename] == extravesselinfo["userkeys"][0]:
                    recognized_state_name = statename

            if not recognized_state_name:
                _mark_node_broken(readonly, node)
                _report_node_problem(
                    node, "The extra vessel '" + node.extra_vessel_name +
                    "' doesn't have a recognized user/state key")

            if len(vessels_in_db) == 0:
                if recognized_state_name == "onepercentmanyevents" or recognized_state_name == "twopercent":
                    # We don't mark it as broken because it may be in transition by a
                    # transition script away from onepercentmanyevents. If the vessels
                    # in the db have been deleted first but the state key hasn't been
                    # changed yet, we might hit this. Also, it's not so bad to have it
                    # not be marked as broken when it's like this, as it has no vessels
                    # we know about, anyways, so we're not going to be giving questionable
                    # resources to users because of it.
                    _report_node_problem(
                        node, "The node is in the " + recognized_state_name +
                        " state " +
                        "but we don't have any vessels for it in the database."
                    )
            else:
                if recognized_state_name != "onepercentmanyevents" and recognized_state_name != "twopercent":
                    # We don't mark it as broken because it may be in transition by a
                    # transition script. Also, we may have other states in the future
                    # besides onepercentmanyevents that have vessels. We don't want
                    # to make all of those nodes inactive if it's just an issue of
                    # someone forgot to update this script.
                    _report_node_problem(
                        node, "The node is in the '" + recognized_state_name +
                        "' state but we have vessels for it in the database.")

        known_vessel_names = []
        for vessel in vessels_in_db:
            known_vessel_names.append(vessel.name)

        # Look for vessels on the node with our node ownerkey which aren't in our database.
        for actualvesselname in nodeinfo["vessels"]:

            vessel_ownerkey = nodeinfo["vessels"][actualvesselname]["ownerkey"]

            try:
                vessel_ownerkey_str = rsa.rsa_publickey_to_string(
                    vessel_ownerkey)
            except ValueError:
                # At this point we aren't sure it's our node, but let's assume that if
                # there's an invalid key then the node is broken, period.
                _mark_node_broken(readonly, node)
                _report_node_problem(
                    node, "Invalid vessel ownerkey: " + str(vessel_ownerkey))
                return

            if vessel_ownerkey_str == node.owner_pubkey:
                if actualvesselname not in known_vessel_names and actualvesselname != node.extra_vessel_name:
                    _mark_node_broken(readonly, node)
                    _report_node_problem(
                        node, "The vessel '" + actualvesselname +
                        "' exists on the node " +
                        "with the ownerkey for the node, but it's not in our vessels table."
                    )

        # Do some checking on each vessel we have in our database.
        for vessel in vessels_in_db:

            # Check that the vessel in our database actually exists on the node.
            if vessel.name not in nodeinfo["vessels"]:
                _mark_node_broken(readonly, node)
                _report_node_problem(
                    node, "The vessel '" + vessel.name +
                    "' in our db doesn't exist on the node.")
                continue

            vesselinfo = nodeinfo["vessels"][vessel.name]

            try:
                vessel_ownerkey_str = rsa.rsa_publickey_to_string(
                    vesselinfo["ownerkey"])
            except ValueError:
                _mark_node_broken(readonly, node)
                _report_node_problem(
                    node, "Invalid vessel ownerkey on a vessel in our db: " +
                    str(vessel_ownerkey))
                return

            # Check that the owner key for the vessel is what we have for the node's owner key in our database.
            if node.owner_pubkey != vessel_ownerkey_str:
                _mark_node_broken(readonly, node)
                _report_node_problem(
                    node, "The vessel '" + vessel.name +
                    "' doesn't have the ownerkey we use for the node.")

            if not vesselinfo["advertise"]:
                _mark_node_broken(readonly, node)
                _report_node_problem(
                    node,
                    "The vessel '" + vessel.name + "' isn't advertising.")

            # We're only concerned with non-dirty vessels as the backend daemon
            # should be working on cleaning up dirty vessels.
            if not vessel.is_dirty:
                # Check that the user keys that have access are the ones that should have access.
                users_with_access = maindb.get_users_with_access_to_vessel(
                    vessel)

                if len(users_with_access) != len(vesselinfo["userkeys"]):
                    _release_vessel(readonly, vessel)
                    _report_node_problem(
                        node, "The vessel '" + vessel.name + "' reports " +
                        str(len(vesselinfo["userkeys"])) +
                        " user keys, but we expected " +
                        str(len(users_with_access)))

                for user in users_with_access:
                    if rsa.rsa_string_to_publickey(
                            user.user_pubkey) not in vesselinfo["userkeys"]:
                        _release_vessel(readonly, vessel)
                        _report_node_problem(
                            node, "The vessel '" + vessel.name +
                            "' doesn't have the userkey for user " +
                            user.username + ".")

    finally:
        # We didn't do any locking if this readonly was True.
        if not readonly:

            # Release the lock
            lockserver.unlock_node(lockserver_handle, node.node_identifier)

            # Destroy the lockserver handle if we created it ourselves.
            if must_destroy_lockserver_handle:
                lockserver.destroy_lockserver_handle(lockserver_handle)