def initialize_keys(keybitsize, nodemanager_directory="."):
  # nodemanager_directory: The directory in which the nodeman.cfg file is
  #                        located.

  # initialize my configuration file.   This involves a few variables:
  #    pollfrequency --  the amount of time to sleep after a check when "busy
  #                      waiting".   This trades CPU load for responsiveness.
  #    ports         --  the ports the node manager could listen on.
  #    publickey     --  the public key used to identify the node...
  #    privatekey    --  the corresponding private key for the node...
  #
  # Only the public key and private key are set here...
  configuration = persist.restore_object('nodeman.cfg')

  curdir = os.getcwd()
  os.chdir(nodemanager_directory)


  keys = rsa.rsa_gen_pubpriv_keys(keybitsize)
  configuration['publickey'] = keys[0]
  configuration['privatekey'] = keys[1]

  persist.commit_object(configuration, "nodeman.cfg")


  os.chdir(curdir)
Example #2
0
def inject(key, value, cfile="nodeman.cfg"):
  configuration = persist.restore_object(cfile)

  # Inject the dictionary
  configuration[key] = value

  persist.commit_object(configuration, cfile)
def initialize_keys(keybitsize,nodemanager_directory="."):
  # nodemanager_directory: The directory in which the nodeman.cfg file is
  #                        located.

  # initialize my configuration file.   This involves a few variables:
  #    pollfrequency --  the amount of time to sleep after a check when "busy
  #                      waiting".   This trades CPU load for responsiveness.
  #    ports         --  the ports the node manager could listen on.
  #    publickey     --  the public key used to identify the node...
  #    privatekey    --  the corresponding private key for the node...
  #
  # Only the public key and private key are set here...
  configuration = persist.restore_object('nodeman.cfg')

  curdir = os.getcwd()
  os.chdir(nodemanager_directory)


  keys = rsa_gen_pubpriv_keys(keybitsize)
  configuration['publickey'] = keys[0]
  configuration['privatekey'] = keys[1]

  persist.commit_object(configuration,"nodeman.cfg")


  os.chdir(curdir)
Example #4
0
def main():

  global configuration

  if not FOREGROUND:
    # Background ourselves.
    daemon.daemonize()

  # ensure that only one instance is running at a time...
  gotlock = runonce.getprocesslock("seattlenodemanager")
  if gotlock == True:
    # I got the lock.   All is well...
    pass
  else:
    if gotlock:
      servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + 
          ") is running")
    else:
      servicelogger.log("[ERROR]:Another node manager process is running")
    return

  
  # I'll grab the necessary information first...
  servicelogger.log("[INFO]:Loading config")
  # BUG: Do this better?   Is this the right way to engineer this?
  configuration = persist.restore_object("nodeman.cfg")
  
  # Armon: initialize the network restrictions
  initialize_ip_interface_restrictions(configuration)
  
  
  
  # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
  #            seattle crontab entry has been installed in the crontab.
  #            Do this here because the "nodeman.cfg" needs to have been read
  #            into configuration via the persist module.
  if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
    if 'crontab_updated_for_2009_installer' not in configuration or \
          configuration['crontab_updated_for_2009_installer'] == False:
      try:
        import update_crontab_entry
        modified_crontab_entry = \
            update_crontab_entry.modify_seattle_crontab_entry()
        # If updating the seattle crontab entry succeeded, then update the
        # 'crontab_updated_for_2009_installer' so the nodemanager no longer
        # tries to update the crontab entry when it starts up.
        if modified_crontab_entry:
          configuration['crontab_updated_for_2009_installer'] = True
          persist.commit_object(configuration,"nodeman.cfg")

      except Exception,e:
        exception_traceback_string = traceback.format_exc()
        servicelogger.log("[ERROR]: The following error occured when " \
                            + "modifying the crontab for the new 2009 " \
                            + "seattle crontab entry: " \
                            + exception_traceback_string)
Example #5
0
def initialize(name, pubkey, version):
    # Need to set the node name, etc. here...
    global nodename
    global nodepubkey
    global nodeversion
    global vesseldict

    nodename = name
    nodepubkey = pubkey

    nodeversion = version

    # load the vessel from disk
    vesseldict = persist.restore_object('vesseldict')

    return vesseldict
def get_servicevessel(maindirectory = '.', readattempts = 3):
  """
  <Purpose>
    Get the service vessel directory using vesseldict. 
    
  <Arguments>
    maindirectory:   This is the directory where repy.py, vesseldict, etc.
                     are stored.   The default is the current directory.

    readattempts:    The number of times to attempt to read vesseldict.
                     Default is 3.
             
  <Exceptions>
    TypeError if maindirectory is not a string
    Exception if reading vesseldict fails (previously recieved OSError
      from persist.py)
        
  <Side Effects>
    None
    
  <Returns>
    The service vessel.  If there is more than one,
    return the "first one" in the vesseldict (note this is a dictionary and
    so the order will be consistent but with little meaning).  If there is no 
    service vessel, return '.'
  """

  global servicevessel
  
  # this is the public key for seattle
  ownerkey = "22599311712094481841033180665237806588790054310631222126405381271924089573908627143292516781530652411806621379822579071415593657088637116149593337977245852950266439908269276789889378874571884748852746045643368058107460021117918657542413076791486130091963112612854591789518690856746757312472362332259277422867 12178066700672820207562107598028055819349361776558374610887354870455226150556699526375464863913750313427968362621410763996856543211502978012978982095721782038963923296750730921093699612004441897097001474531375768746287550135361393961995082362503104883364653410631228896653666456463100850609343988203007196015297634940347643303507210312220744678194150286966282701307645064974676316167089003178325518359863344277814551559197474590483044733574329925947570794508677779986459413166439000241765225023677767754555282196241915500996842713511830954353475439209109249856644278745081047029879999022462230957427158692886317487753201883260626152112524674984510719269715422340038620826684431748131325669940064404757120601727362881317222699393408097596981355810257955915922792648825991943804005848347665699744316223963851263851853483335699321871483966176480839293125413057603561724598227617736944260269994111610286827287926594015501020767105358832476708899657514473423153377514660641699383445065369199724043380072146246537039577390659243640710339329506620575034175016766639538091937167987100329247642670588246573895990251211721839517713790413170646177246216366029853604031421932123167115444834908424556992662935981166395451031277981021820123445253"
  ownerinfo = ""  

  # A TypeError should happen if maindirectory is of the wrong type
  vesseldictlocation = os.path.join(maindirectory, "vesseldict")

  vesseldictloaded = False
  for dontcare in range(0, readattempts):
    try:
      vesseldict = persist.restore_object(vesseldictlocation)
    except ValueError, e:
      # this means that the vessel doesn't exist.   Let's return the current
      # directory...
      return '.'
    except OSError:
      continue
Example #7
0
def initialize(name, pubkey, version):
  # Need to set the node name, etc. here...
  global nodename
  global nodepubkey
  global nodeversion
  global vesseldict
  

  nodename = name
  nodepubkey = pubkey

  nodeversion = version 

  # load the vessel from disk
  vesseldict = persist.restore_object('vesseldict')

  return vesseldict
Example #8
0
def get_servicevessel(maindirectory='.', readattempts=3):
    """
  <Purpose>
    Get the service vessel directory using vesseldict. 
    
  <Arguments>
    maindirectory:   This is the directory where repy.py, vesseldict, etc.
                     are stored.   The default is the current directory.

    readattempts:    The number of times to attempt to read vesseldict.
                     Default is 3.
             
  <Exceptions>
    TypeError if maindirectory is not a string
    Exception if reading vesseldict fails (previously recieved OSError
      from persist.py)
        
  <Side Effects>
    None
    
  <Returns>
    The service vessel.  If there is more than one,
    return the "first one" in the vesseldict (note this is a dictionary and
    so the order will be consistent but with little meaning).  If there is no 
    service vessel, return '.'
  """

    global servicevessel

    # this is the public key for seattle
    ownerkey = "22599311712094481841033180665237806588790054310631222126405381271924089573908627143292516781530652411806621379822579071415593657088637116149593337977245852950266439908269276789889378874571884748852746045643368058107460021117918657542413076791486130091963112612854591789518690856746757312472362332259277422867 12178066700672820207562107598028055819349361776558374610887354870455226150556699526375464863913750313427968362621410763996856543211502978012978982095721782038963923296750730921093699612004441897097001474531375768746287550135361393961995082362503104883364653410631228896653666456463100850609343988203007196015297634940347643303507210312220744678194150286966282701307645064974676316167089003178325518359863344277814551559197474590483044733574329925947570794508677779986459413166439000241765225023677767754555282196241915500996842713511830954353475439209109249856644278745081047029879999022462230957427158692886317487753201883260626152112524674984510719269715422340038620826684431748131325669940064404757120601727362881317222699393408097596981355810257955915922792648825991943804005848347665699744316223963851263851853483335699321871483966176480839293125413057603561724598227617736944260269994111610286827287926594015501020767105358832476708899657514473423153377514660641699383445065369199724043380072146246537039577390659243640710339329506620575034175016766639538091937167987100329247642670588246573895990251211721839517713790413170646177246216366029853604031421932123167115444834908424556992662935981166395451031277981021820123445253"
    ownerinfo = ""

    # A TypeError should happen if maindirectory is of the wrong type
    vesseldictlocation = os.path.join(maindirectory, "vesseldict")

    vesseldictloaded = False
    for dontcare in range(0, readattempts):
        try:
            vesseldict = persist.restore_object(vesseldictlocation)
        except ValueError, e:
            # this means that the vessel doesn't exist.   Let's return the current
            # directory...
            return '.'
        except OSError:
            continue
def test_launch(arguments):
  # Setup a clean slate
  restore_copy()

  # Create the cmdline option
  command = "python seattleinstaller.py --onlynetwork "+arguments

  # Call subprocess
  proc = subprocess.Popen(command,shell=True)

  # Wait for the process to finish
  proc.wait()

  # Get the configuration
  config = persist.restore_object("nodeman.cfg")
 
  # Return the config
  return config
Example #10
0
def test_launch(arguments):
    # Setup a clean slate
    restore_copy()

    # Create the cmdline option
    command = "python seattleinstaller.py --onlynetwork " + arguments

    # Call subprocess
    proc = subprocess.Popen(command, shell=True)

    # Wait for the process to finish
    proc.wait()

    # Get the configuration
    config = persist.restore_object("nodeman.cfg")

    # Return the config
    return config
Example #11
0
def main():
    global configuration

    if not FOREGROUND:
        # Background ourselves.
        daemon.daemonize()

    # Check if we are running in testmode.
    if TEST_NM:
        nodemanager_pid = os.getpid()
        servicelogger.log(
            "[INFO]: Running nodemanager in test mode on port 1224, " +
            "pid %s." % str(nodemanager_pid))
        nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'),
                                'w')

        # Write out the pid of the nodemanager process that we started to a file.
        # This is only done if the nodemanager was started in test mode.
        try:
            nodeman_pid_file.write(str(nodemanager_pid))
        finally:
            nodeman_pid_file.close()

    else:
        # ensure that only one instance is running at a time...
        gotlock = runonce.getprocesslock("seattlenodemanager")

        if gotlock == True:
            # I got the lock.   All is well...
            pass
        else:
            if gotlock:
                servicelogger.log(
                    "[ERROR]:Another node manager process (pid: " +
                    str(gotlock) + ") is running")
            else:
                servicelogger.log(
                    "[ERROR]:Another node manager process is running")
            return

    servicelogger.log('[INFO]: This is Seattle release "' + version + "'")

    # Feature add for #1031: Log information about the system in the nm log...
    servicelogger.log('[INFO]:platform.python_version(): "' +
                      str(platform.python_version()) + '"')
    servicelogger.log('[INFO]:platform.platform(): "' +
                      str(platform.platform()) + '"')

    # uname on Android only yields 'Linux', let's be more specific.
    try:
        import android
        servicelogger.log('[INFO]:platform.uname(): Android / "' +
                          str(platform.uname()) + '"')
    except ImportError:
        servicelogger.log('[INFO]:platform.uname(): "' +
                          str(platform.uname()) + '"')

    # I'll grab the necessary information first...
    servicelogger.log("[INFO]:Loading config")
    # BUG: Do this better?   Is this the right way to engineer this?
    configuration = persist.restore_object("nodeman.cfg")

    # Armon: initialize the network restrictions
    initialize_ip_interface_restrictions(configuration)

    # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
    #            seattle crontab entry has been installed in the crontab.
    #            Do this here because the "nodeman.cfg" needs to have been read
    #            into configuration via the persist module.
    if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
        if 'crontab_updated_for_2009_installer' not in configuration or \
              configuration['crontab_updated_for_2009_installer'] == False:
            try:
                # crontab may not exist on Android, therefore let's not check
                # if we are running on Android. See #1302 and #1254.
                try:
                    import android
                except ImportError:
                    import update_crontab_entry
                    modified_crontab_entry = \
                        update_crontab_entry.modify_seattle_crontab_entry()
                    # If updating the seattle crontab entry succeeded, then update the
                    # 'crontab_updated_for_2009_installer' so the nodemanager no longer
                    # tries to update the crontab entry when it starts up.
                    if modified_crontab_entry:
                        configuration[
                            'crontab_updated_for_2009_installer'] = True
                        persist.commit_object(configuration, "nodeman.cfg")

            except Exception, e:
                exception_traceback_string = traceback.format_exc()
                servicelogger.log("[ERROR]: The following error occured when " \
                                    + "modifying the crontab for the new 2009 " \
                                    + "seattle crontab entry: " \
                                    + exception_traceback_string)
Example #12
0
def main(vesselcreationlist, tenpercentdict, targetdirectory):
    """
  <Purpose>
    To take the vesselcreationlist (list containing owner and user info)
    and generate the initial resource files and directories. 
    
    Working Definition: 
      percentage - in this method we are dealing with how to split up the 
      donated resources (this could 10% of the system's available resources, 
      or it could be a user defined percentage but regardless we only 
      consider the portion donated). So any time we refer to percentages 
      in this method we are talking about the percentage of the donated 
      resources. This applies to the tenpercentdict that is passed as an
      arguement (it is only ten percent of the donated resources).
    
    This method does not special case the seattle vessel, it only creates
    vessels based off what is in the vesselcreationlist list (the original
    writecustominstaller handled the seattle vessel seperately).
    
  <Arguements>
    vesselcreationlist: a list that has the following format:
    [(vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),
      (vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),...]
      It contains the information for the initial vessels of the node.
     
    targetdirectory: a string containing the directory where the 
      restrictions files and vesseldict will be placed.
     
  <Exceptions>
    ValueError: if the vessel percent (from element 0 of a tuple in the 
      vesselcreationlist list) is not 10,20,30, ... , 80, 90, 100
      A ValueError will also be raised if the total percent (of all
      the vessel percents in the vesselcreationlist list) does not add
      up to the integer 100

    IOError: if vessel.restrictions is not found, this must be included
      ahead of time in the target directory.
  
    OSError, WindowsError: if os.mkdir is unable to create the vessel
      directories.
      
  <Side Effects>
    Creates the vesseldict using persist.commit_object.
    Creates resource files for each vessel with names of the form
      resource.v1, resource.v2, ...
    Creates a directory for each vessel with a name of the form v1, v2, ...
      
  <Return>
    None
  
  <Notes>   
    The vesselcreationlist is a list that has the following format:
    [(vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),
     (vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames), ...]
  
    So it might look like:
      [(10, 'joe.publickey', []),
       (50, 'jim.publickey', ['alice.publickey', 'bob.publickey']), 
       (40, 'alice.publickey', []) ]
  
    This would indicate v1 is owned by joe and gets 10 percent, v2 is owned
    by jim has users alice and bob and gets 50 percent, v3 is owned by
    alice and gets 40 percent.   I'll track the seattle information and
    will write it independently.
  
  <Notes>
    The structure of this method can be described in 3 distinct sections.
    1) The vesseldict is constructed from the information in vesselcreationlist,
       is contains the public keys and other vessel information.
    2) The resource files are written to files for all the vessels
    3) Directories are created for all the vessels.
  """

    # Check to ensure that the vessel information is in the correct format.
    # Each vessel percent must be an integer multiple of ten from (0,100]
    # and the total must be 100. This is checked here instead of when the
    # list is originally constructed from the vesselinfo file so that only
    # this method must be changed if the granularity of the possible vessel
    # percentages is changed.
    total = 0

    for item in vesselcreationlist:
        if item[0] not in range(10, 101, 10):
            raise ValueError, "Invalid vessel percent '" + str(item[0]) + "'"
        total = total + item[0]

    if total != 100:
        raise ValueError, "Total of vessel percentage is '" + str(
            total) + "', not 100."

    vesseldict = {}
    vesselnumber = 1
    # time to work on the vessel dictionary...
    for item in vesselcreationlist:

        vesselname = 'v' + str(vesselnumber)

        vesseldict['v' + str(vesselnumber)] = {
            'userkeys': item[2],
            'ownerkey': item[1],
            'oldmetadata': None,
            'stopfilename': vesselname + '.stop',
            'logfilename': vesselname + '.log',
            'statusfilename': vesselname + '.status',
            'resourcefilename': 'resource.' + vesselname,
            'advertise': True,
            'ownerinformation': '',
            'status': 'Fresh'
        }
        vesselnumber = vesselnumber + 1

    persist.commit_object(vesseldict, targetdirectory + "/vesseldict")

    # I'm going to do the resources / restrictions now...
    onetenth = tenpercentdict.copy()

    # These are the restrictions that apply to all vessels, they are
    # not a resource we measure.
    restrictionsfo = file('vessel.restrictions')
    restrictionsstring = restrictionsfo.read()
    restrictionsfo.close()

    # I'll use this to figure out which ports to assign
    usedpercent = 0
    vesselnumber = 1

    for item in vesselcreationlist:

        # the percentcount variable is slightly confusing, up until we have talked
        # about vessels as haveing 10 or 20 or ... percent of the donated resources.
        # We restrict vessels to being a multiple of ten so that we do not have to
        # cut down the donated resources to far (this is an attempt to keep things
        # simple and avoid getting resources at zero).
        # percentcount should be an integer between 0 and 10
        percentcount = item[0] / 10
        # make a resource file of the right size...
        size = percentcount
        thisresourcedata = tenpercentdict.copy()
        while size > 1:
            thisresourcedata = nmresourcemath.add(thisresourcedata, onetenth)
            size = size - 1

        # I need the ports...
        startpercent = usedpercent
        endpercent = usedpercent + percentcount
        # a yucky way of getting the ports.   Should do 63100-63109 for the first,
        # 63110-63119 for the second, etc.
        thisresourcedata['messport'] = set(
            range(63100 + 10 * startpercent, 63100 + 10 * endpercent))
        thisresourcedata['connport'] = set(
            range(63100 + 10 * startpercent, 63100 + 10 * endpercent))

        nmresourcemath.write_resource_dict(
            thisresourcedata,
            targetdirectory + "/resource.v" + str(vesselnumber))

        # append the restrictions data.
        restrictionsfo = file(
            targetdirectory + '/resource.v' + str(vesselnumber), "a")
        restrictionsfo.write(restrictionsstring)
        restrictionsfo.close()

        # increment the vesselnumber and used percent
        vesselnumber = vesselnumber + 1
        usedpercent = usedpercent + percentcount

    # Get the directory, if any, that is used for security layers.
    configuration = persist.restore_object("nodeman.cfg")
    repy_prepend_dir = None
    if 'repy_prepend_dir' in configuration:
        repy_prepend_dir = configuration['repy_prepend_dir']

    # Get the list of files in the security layer directory
    repy_prepend_files = []
    if repy_prepend_dir is not None:
        repy_prepend_files = os.listdir(repy_prepend_dir)

    # make the directories...
    for num in range(len(vesselcreationlist)):
        vesselname = 'v' + str(num + 1)
        try:
            WindowsError

        except NameError:  # not on windows...
            # make the vessel dirs...
            try:
                os.mkdir(targetdirectory + "/" + vesselname)
            except OSError, e:
                if e[0] == 17:
                    # directory exists
                    pass
                else:
                    raise

        else:  # on Windows...

            # make the vessel dirs...
            try:
                os.mkdir(targetdirectory + "/" + vesselname)
            except (OSError, WindowsError), e:
                if e[0] == 17 or e[0] == 183:
                    # directory exists
                    pass
                else:
                    raise
Example #13
0
def handle_threading_error():
  """
  <Purpose>
    Handles a repy node failing with ThreadErr. If repy is allowed to use
    more than 10% of the current threads, reduce the global thread count by 50%
    and stop all existing vessels

  <Arguments>
    None
  
  <Exceptions>
    None

  <Side Effects>
    May re-write all resource files and stop all vessels

  <Returns>
    None
  """
  # Make a log of this
  servicelogger.log("[ERROR]:A Repy vessel has exited with ThreadErr status. Checking to determine next step")

  # Get all the names of the vessels
  vesselnamelist = nmAPI.vesseldict.keys()
  
  # read in all of the resource files so that we can look at and possibly 
  # manipulate them.
  resourcedicts = {}
  for vesselname in vesselnamelist:
    resourcedicts[vesselname] = resourcemanipulation.read_resourcedict_from_file('resource.'+vesselname)
  
  # Get the number of threads Repy has allocated
  allowedthreadcount = 0
  for vesselname in vesselnamelist:
    allowedthreadcount = allowedthreadcount + resourcedicts[vesselname]['events']
  
  # Get the total number os system threads currently used 
  totalusedthreads = nonportable.os_api.get_system_thread_count()
  
  # Log this information
  servicelogger.log("[WARNING]:System Threads: "+str(totalusedthreads)+"  Repy Allocated Threads: "+str(allowedthreadcount))
  
  # Get the NM configuration
  configuration = persist.restore_object("nodeman.cfg")
  
  # Check if there is a threshold configuration,
  # otherwise add the default configuration
  if NOOP_CONFIG_KEY in configuration:
    threshold = configuration[NOOP_CONFIG_KEY]
  else:
    threshold = DEFAULT_NOOP_THRESHOLD
    configuration[NOOP_CONFIG_KEY] = threshold
    persist.commit_object(configuration, "nodeman.cfg")
  
  # Check if we are below the threshold, if so
  # then just return, this is a noop
  if allowedthreadcount < totalusedthreads * threshold:
    return
  
  servicelogger.log("[ERROR]:Reducing number of system threads!")



  #### We are above the threshold!   Let's cut everything by 1/2

  # First, update the resource files
  for vesselname in vesselnamelist:
    # cut the events by 1/2
    resourcedicts[vesselname]['events'] = resourcedicts[vesselname]['events'] / 2
    # write out the new resource files...
    resourcemanipulation.write_resourcedict_to_file(resourcedicts[vesselname], 'resource.'+vesselname)
  

  
  
  # Create the stop tuple, exit code 57 with an error message
  stoptuple = (57, "Fatal system-wide threading error! Stopping all vessels.")
  
  # Stop each vessel
  for vesselname in vesselnamelist:
    try:
      # Stop each vessel, using our stoptuple
      nmAPI.stopvessel(vesselname,stoptuple)
    except Exception, exp:
      # Forge on, regardless of errors
      servicelogger.log("[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: "+str(exp))
      servicelogger.log_last_exception()
shutil.copy("private_encasementlib.repy", "securitylayers")
shutil.copy("private_wrapper.repy", "securitylayers")
shutil.copy("private_hideprivate_layer.repy", "securitylayers")

initproc = subprocess.Popen([sys.executable, "nminit.py"])
initproc.wait()

# nminit doesn't automatically initialize the vessels with security layer files,
# so we should do this ourselves
for vesseldirectoryname in glob.glob('v[0-9]*'):
  if os.path.isdir(vesseldirectoryname):
    shutil.rmtree(vesseldirectoryname)
    shutil.copytree("securitylayers", vesseldirectoryname)

# add security layer configuration to nodeman.cfg
configuration = persist.restore_object("nodeman.cfg")
configuration['repy_prepend'] = ["private_encasementlib.repy", "private_hideprivate_layer.repy"]
configuration['repy_prepend_dir'] = "securitylayers"
persist.commit_object(configuration, "nodeman.cfg")

nmproc = subprocess.Popen([sys.executable, "nmmain.py", "--test-mode"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

# without closing stdin, stdout, stderr, nmmain.py won't execute on XP
nmproc.stdin.close()
nmproc.stdout.close()
nmproc.stderr.close()

try:
  # We'll want to kill this once we are told to do so... (sys.stdin.close() is
  # the signal)
  sys.stdin.read()
Example #15
0
def main():
  global configuration

  if not FOREGROUND:
    # Background ourselves.
    daemon.daemonize()


  # Check if we are running in testmode.
  if TEST_NM:
    nodemanager_pid = os.getpid()
    servicelogger.log("[INFO]: Running nodemanager in test mode on port 1224, "+
                      "pid %s." % str(nodemanager_pid))
    nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'), 'w')
    
    # Write out the pid of the nodemanager process that we started to a file.
    # This is only done if the nodemanager was started in test mode.
    try:
      nodeman_pid_file.write(str(nodemanager_pid))
    finally:
      nodeman_pid_file.close()

  else:
    # ensure that only one instance is running at a time...
    gotlock = runonce.getprocesslock("seattlenodemanager")

    if gotlock == True:
      # I got the lock.   All is well...
      pass
    else:
      if gotlock:
        servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + 
                        ") is running")
      else:
        servicelogger.log("[ERROR]:Another node manager process is running")
      return


  servicelogger.log('[INFO]: This is Seattle release "' + version + "'") 

  # Feature add for #1031: Log information about the system in the nm log...
  servicelogger.log('[INFO]:platform.python_version(): "' + 
    str(platform.python_version())+'"')
  servicelogger.log('[INFO]:platform.platform(): "' + 
    str(platform.platform())+'"')

  # uname on Android only yields 'Linux', let's be more specific.
  try:
    import android
    servicelogger.log('[INFO]:platform.uname(): Android / "' + 
      str(platform.uname())+'"')
  except ImportError:
    servicelogger.log('[INFO]:platform.uname(): "'+str(platform.uname())+'"')

  # I'll grab the necessary information first...
  servicelogger.log("[INFO]:Loading config")
  # BUG: Do this better?   Is this the right way to engineer this?
  configuration = persist.restore_object("nodeman.cfg")

  # If Seattle is not installed, the nodemanager will have no vesseldict
  # and an incomplete config. Log this problem and exit.
  try:
    if configuration["seattle_installed"] is not True:
      servicelogger.log("[ERROR]:Seattle is not installed. Run the Seattle installer to create the required configuration files before starting the nodemanager. Exiting.")
      harshexit.harshexit(10)
  except KeyError:
    # There isn't even a "seattle_installed" entry in this dict!?
    servicelogger.log("[ERROR]:The nodemanager configuration, nodeman.cfg, is corrupt. Exiting.")
    harshexit.harshexit(11)
  
  
  # Armon: initialize the network restrictions
  initialize_ip_interface_restrictions(configuration)
  
  
  # Enable Affix and overload various Repy network API calls 
  # with Affix-enabled calls.
  # Use the node's publickey to generate a name for our node.
  mypubkey = rsa_publickey_to_string(configuration['publickey']).replace(" ", "")
  affix_stack_name = sha_hexhash(mypubkey)

  enable_affix('(CoordinationAffix)(MakeMeHearAffix)(NamingAndResolverAffix,' + 
      affix_stack_name + ')')

  # get the external IP address...
  myip = None
  while True:
    try:
      # Try to find our external IP.
      myip = emulcomm.getmyip()
    except Exception, e: # Replace with InternetConnectivityError ?
      # If we aren't connected to the internet, emulcomm.getmyip() raises this:
      if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.":
        # So we try again.
        pass
      else:
        # It wasn't emulcomm.getmyip()'s exception. re-raise.
        raise
    else:
      # We succeeded in getting our external IP. Leave the loop.
      break
    time.sleep(0.1)
def uninstall_Linux_and_Mac():
    """
  <Purpose>
    Remove the seattle entry from the crontab, and kill all seattle processes
    by using stop_all_seattle_processes.py
  <Arguments>
    None.
  <Exceptions>
    SeattleNotInstalledError if seattle had not been initially installed when
      the uninstaller was run.
    UnsupportedOSError from a child function if the OS running this script is
      not supported.
  <Side Effects>
    Removes the seattle entry from the crontab and stops seattle from running.
  <Returns>
    True if succeeded in uninstalling,
    False otherwise.
  """

    # Derek Cheng: Find out if this is a Nokia N800/900 Tablet, and if so runs a
    # separate uninstaller because there is no crontab on the tablets.
    if platform.machine().startswith('armv'):
        return uninstall_nokia()

    if platform.machine().startswith('mips'):
        return True

    # Find out if Seattle is installed (currently in the crontab), and remove if
    # so.
    crontab_contents_stdout = subprocess.Popen(["crontab", "-l"],
                                               stdout=subprocess.PIPE,
                                               stderr=subprocess.PIPE).stdout

    # Generate a temp file with the user's crontab minus our task.
    temp_crontab_file = tempfile.NamedTemporaryFile()

    seattle_crontab_entry_found = False
    for line in crontab_contents_stdout:
        if not seattleinstaller.get_starter_file_name() in line:
            temp_crontab_file.write(line)
        else:
            seattle_crontab_entry_found = True

    if not seattle_crontab_entry_found:
        temp_crontab_file.close()
        raise SeattleNotInstalledError("Seattle cannot be uninstalled because it " \
                                         + "is not currently installed.")

    # Replace the old crontab with the updated crontab contents.
    temp_crontab_file.flush()
    replace_crontab = subprocess.Popen(["crontab", temp_crontab_file.name],
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
    replace_crontab.wait()
    temp_crontab_file.close()

    # Confirm that seattle was successfully removed from the crontab, and set
    # the 'crontab_updated_for_2009_installer' value in nodeman.cfg to False.
    modified_crontab_contents_stdout,modified_crontab_contents_stderr = \
        subprocess.Popen(["crontab","-l"],
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE).communicate()

    if seattleinstaller.get_starter_file_name() \
          in modified_crontab_contents_stdout:
        return False
    else:
        # Stop all instances of seattle from running before returning.
        stop_all_seattle_processes.main()

        configuration = persist.restore_object("nodeman.cfg")
        configuration['crontab_updated_for_2009_installer'] = False
        persist.commit_object(configuration, "nodeman.cfg")

        return True
Example #17
0
def handle_threading_error(nmAPI):
    """
  <Purpose>
    Handles a repy node failing with ThreadErr. Reduces global thread count by 50%.
    Restarts all existing vesselts

  <Arguments>
    nmAPI: the nmAPI module -- passed to the function to avoid import loops;
           see ticket #590 for more information about this.
  """
    # Make a log of this
    servicelogger.log(
        "[ERROR]:A Repy vessel has exited with ThreadErr status. Patching restrictions and reseting all vessels."
    )

    # Get the number of threads Repy has allocated
    allocatedThreads = get_allocated_threads()

    # Get the number os system threads currently
    systemThreads = nonportable.os_api.get_system_thread_count()

    # Log this information
    servicelogger.log(
        "[ERROR]:System Threads: " + str(systemThreads) + "  Repy Allocated Threads: " + str(allocatedThreads)
    )

    # Get the NM configuration
    configuration = persist.restore_object("nodeman.cfg")

    # Check if there is a threshold configuration,
    # otherwise add the default configuration
    if NOOP_CONFIG_KEY in configuration:
        threshold = configuration[NOOP_CONFIG_KEY]
    else:
        threshold = DEFAULT_NOOP_THRESHOLD
        configuration[NOOP_CONFIG_KEY] = threshold
        persist.commit_object(configuration, "nodeman.cfg")

    # Check if we are below the threshold, if so
    # then just return, this is a noop
    if allocatedThreads < systemThreads * threshold:
        return

    # We are continuing, so we are above the threshold!
    # First, update the restrictions
    update_restrictions()

    # Then, stop the vessels
    # Get all the vessels
    vessels = nmAPI.vesseldict.keys()

    # Create the stop tuple, exit code 57 with an error message
    stoptuple = (57, "Fatal system-wide threading error! Stopping all vessels.")

    # Stop each vessel
    for vessel in vessels:
        try:
            # Stop each vessel, using our stoptuple
            nmAPI.stopvessel(vessel, stoptuple)
        except Exception, exp:
            # Forge on, regardless of errors
            servicelogger.log("[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: " + str(exp))
            servicelogger.log_last_exception()
def main(vesselcreationlist, tenpercentdict, targetdirectory):
  """
  <Purpose>
    To take the vesselcreationlist (list containing owner and user info)
    and generate the initial resource files and directories. 
    
    Working Definition: 
      percentage - in this method we are dealing with how to split up the 
      donated resources (this could 10% of the system's available resources, 
      or it could be a user defined percentage but regardless we only 
      consider the portion donated). So any time we refer to percentages 
      in this method we are talking about the percentage of the donated 
      resources. This applies to the tenpercentdict that is passed as an
      arguement (it is only ten percent of the donated resources).
    
    This method does not special case the seattle vessel, it only creates
    vessels based off what is in the vesselcreationlist list (the original
    writecustominstaller handled the seattle vessel seperately).
    
  <Arguements>
    vesselcreationlist: a list that has the following format:
    [(vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),
      (vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),...]
      It contains the information for the initial vessels of the node.
     
    targetdirectory: a string containing the directory where the 
      restrictions files and vesseldict will be placed.
     
  <Exceptions>
    ValueError: if the vessel percent (from element 0 of a tuple in the 
      vesselcreationlist list) is not 10,20,30, ... , 80, 90, 100
      A ValueError will also be raised if the total percent (of all
      the vessel percents in the vesselcreationlist list) does not add
      up to the integer 100

    IOError: if vessel.restrictions is not found, this must be included
      ahead of time in the target directory.
  
    OSError, WindowsError: if os.mkdir is unable to create the vessel
      directories.
      
  <Side Effects>
    Creates the vesseldict using persist.commit_object.
    Creates resource files for each vessel with names of the form
      resource.v1, resource.v2, ...
    Creates a directory for each vessel with a name of the form v1, v2, ...
      
  <Return>
    None
  
  <Notes>   
    The vesselcreationlist is a list that has the following format:
    [(vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),
     (vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames), ...]
  
    So it might look like:
      [(10, 'joe.publickey', []),
       (50, 'jim.publickey', ['alice.publickey', 'bob.publickey']), 
       (40, 'alice.publickey', []) ]
  
    This would indicate v1 is owned by joe and gets 10 percent, v2 is owned
    by jim has users alice and bob and gets 50 percent, v3 is owned by
    alice and gets 40 percent.   I'll track the seattle information and
    will write it independently.
  
  <Notes>
    The structure of this method can be described in 3 distinct sections.
    1) The vesseldict is constructed from the information in vesselcreationlist,
       is contains the public keys and other vessel information.
    2) The resource files are written to files for all the vessels
    3) Directories are created for all the vessels.
  """

  # Check to ensure that the vessel information is in the correct format.
  # Each vessel percent must be an integer multiple of ten from (0,100]
  # and the total must be 100. This is checked here instead of when the
  # list is originally constructed from the vesselinfo file so that only
  # this method must be changed if the granularity of the possible vessel
  # percentages is changed.  
  total = 0
  
  for item in vesselcreationlist:
    if item[0] not in range(10, 101, 10):
      raise ValueError, "Invalid vessel percent '"+str(item[0])+"'"
    total = total + item[0]

  if total != 100:
    raise ValueError, "Total of vessel percentage is '"+str(total)+"', not 100."


 
  vesseldict = {}
  vesselnumber = 1
  # time to work on the vessel dictionary...
  for item in vesselcreationlist:

    vesselname = 'v'+str(vesselnumber)

    vesseldict['v'+str(vesselnumber)] = {'userkeys':item[2], 'ownerkey':item[1], 'oldmetadata':None, 'stopfilename':vesselname+'.stop', 'logfilename':vesselname+'.log', 'statusfilename':vesselname+'.status', 'resourcefilename':'resource.'+vesselname, 'advertise':True, 'ownerinformation':'', 'status':'Fresh'}
    vesselnumber = vesselnumber + 1
    
  persist.commit_object(vesseldict,targetdirectory+"/vesseldict")


  # I'm going to do the resources / restrictions now...
  onetenth = tenpercentdict.copy()
  
  # These are the restrictions that apply to all vessels, they are
  # not a resource we measure.
  restrictionsfo = file('vessel.restrictions')
  restrictionsstring = restrictionsfo.read()
  restrictionsfo.close()

  # I'll use this to figure out which ports to assign
  usedpercent = 0
  vesselnumber = 1

  for item in vesselcreationlist:
    
    # the percentcount variable is slightly confusing, up until we have talked
    # about vessels as haveing 10 or 20 or ... percent of the donated resources.
    # We restrict vessels to being a multiple of ten so that we do not have to
    # cut down the donated resources to far (this is an attempt to keep things
    # simple and avoid getting resources at zero).
    # percentcount should be an integer between 0 and 10
    percentcount = item[0] / 10
    # make a resource file of the right size...
    size = percentcount
    thisresourcedata = tenpercentdict.copy()
    while size > 1:
      thisresourcedata = nmresourcemath.add(thisresourcedata, onetenth)
      size = size - 1

    # I need the ports...
    startpercent = usedpercent
    endpercent = usedpercent + percentcount
    # a yucky way of getting the ports.   Should do 63100-63109 for the first,
    # 63110-63119 for the second, etc.
    thisresourcedata['messport'] = set(range(63100+10*startpercent, 63100+10*endpercent))
    thisresourcedata['connport'] = set(range(63100+10*startpercent, 63100+10*endpercent))
    
    
    nmresourcemath.write_resource_dict(thisresourcedata, targetdirectory+"/resource.v"+str(vesselnumber))
        
    # append the restrictions data.
    restrictionsfo = file(targetdirectory+'/resource.v'+str(vesselnumber),"a")
    restrictionsfo.write(restrictionsstring)
    restrictionsfo.close()

    # increment the vesselnumber and used percent
    vesselnumber = vesselnumber + 1
    usedpercent = usedpercent + percentcount


  # Get the directory, if any, that is used for security layers.
  configuration = persist.restore_object("nodeman.cfg")
  repy_prepend_dir = None
  if 'repy_prepend_dir' in configuration:
    repy_prepend_dir = configuration['repy_prepend_dir']

  # Get the list of files in the security layer directory
  repy_prepend_files = []
  if repy_prepend_dir is not None:
    repy_prepend_files = os.listdir(repy_prepend_dir)

  # make the directories...
  for num in range(len(vesselcreationlist)):
    vesselname = 'v'+str(num+1)
    try:
      WindowsError

    except NameError: # not on windows...
      # make the vessel dirs...
      try:
        os.mkdir(targetdirectory+"/"+vesselname)
      except OSError,e:
        if e[0] == 17:
          # directory exists
          pass
        else:
          raise

    else: # on Windows...

      # make the vessel dirs...
      try:
        os.mkdir(targetdirectory+"/"+vesselname)
      except (OSError,WindowsError),e:
        if e[0] == 17 or e[0] == 183:
          # directory exists
          pass
        else:
          raise
  successful_uninstall = False
  try:
    if OS == 'Linux' or OS == 'Darwin':
      successful_uninstall = uninstall_Linux_and_Mac()
    else:
      successful_uninstall = uninstall_Windows()
  except SeattleNotInstalledError,s:
    _output("Seattle was not detected as having been installed.")
    return



  # Print final output, and do final logging.
  if successful_uninstall:
    # Modify nodeman.cfg to note that seattle is no longer installed.
    configuration = persist.restore_object("nodeman.cfg")
    configuration['seattle_installed'] = False
    persist.commit_object(configuration,"nodeman.cfg")

    _output("Seattle has been successfully uninstalled.  It is now safe to " \
              + "remove all Seattle files and directories from your system.")
    servicelogger.log(time.strftime(" seattle completed uninstall on: " \
                                      + "%m-%d-%Y %H:%M:%S"))
  else:
    _output("Seattle could not be uninstalled for unknown reasons. Please " \
              + "contact the Seattle development team for assistance.")




if __name__ == "__main__":
Example #20
0
                            + exception_traceback_string)
  
  if LOCAL_MODE:
    servicelogger.log('[LOCAL]: Operating in local_mode.')
    nmrequesthandler.LOCAL_MODE = LOCAL_MODE
    fastsigneddata.LOCAL_MODE = LOCAL_MODE

    if 'local_mode' not in configuration:
      configuration['local_mode'] = True
      servicelogger.log('[LOCAL]: Adding local_mode to nodeman.cfg!')
    else:
      configuration['local_mode'] = True
      servicelogger.log('[LOCAL]: Setting local_mode to True in nodeman.cfg')
    persist.commit_object(configuration, "nodeman.cfg")

    tempVesselDict = persist.restore_object("vesseldict")
    for vesselname in tempVesselDict.keys()[:]:
      thisentry = tempVesselDict[vesselname]
      thisentry['local_mode'] = True
    persist.commit_object(tempVesselDict, "vesseldict")

  else:
    configuration['local_mode'] = False
    servicelogger.log('[LOCAL]: Setting local_mode to false...')
    persist.commit_object(configuration, "nodeman.cfg")

    tempVesselDict = persist.restore_object("vesseldict")
    for vesselname in tempVesselDict.keys()[:]:
      thisentry = tempVesselDict[vesselname]
      thisentry['local_mode'] = False
    persist.commit_object(tempVesselDict, "vesseldict")
Example #21
0
def startvessel_ex(vesselname, prog_platform, argstring):
  
  # Convert the programming platform to lowercase to make
  # it case insensitive.
  prog_platform = prog_platform.lower()

  if vesselname not in vesseldict:
    raise BadRequest, "No such vessel"

  if vesseldict[vesselname]['status'] == 'Started':
    raise BadRequest("Vessel has already been started")

  if prog_platform not in prog_platform_dir.keys():
    raise BadRequest("Programming language platform is not supported.")

  # remove any prior stop file so that we can start
  if os.path.exists(vesseldict[vesselname]['stopfilename']):
    os.remove(vesseldict[vesselname]['stopfilename'])

  for char in argstring:
    if char not in allowedchars:
      raise BadRequest("Character '"+char+"' not allowed in arguments")
 
  # I'm going to capture the status and timestamp and then check the see if
  # the timestamp is updated...
  oldstatus, oldtimestamp = statusstorage.read_status(vesseldict[vesselname]['statusfilename'])

  # Armon: this is required to fetch the networkrestrictions information from the configuration
  configuration = persist.restore_object("nodeman.cfg")

  
  # Armon: Generate the IP/Iface preferences if they exist
  ip_iface_preference_flags = []
  ip_iface_preference_str = ""    # Needed for Win Mobile

  # Only add the flags if everything necessary exists
  if 'networkrestrictions' in configuration and 'repy_restricted' in configuration['networkrestrictions'] \
    and configuration['networkrestrictions']['repy_restricted'] and 'repy_user_preference' in configuration['networkrestrictions']:
      # Generate the preference list
      for (is_ip, value) in configuration['networkrestrictions']['repy_user_preference']:
        # Append the correct flag
        if is_ip:
          ip_iface_preference_flags.append("--ip")
          ip_iface_preference_str += "--ip "
        else:
          ip_iface_preference_flags.append("--iface")
          ip_iface_preference_str += "--iface "
          
        # Append the value
        ip_iface_preference_flags.append(value)
        ip_iface_preference_str += "'" + value + "' "
        
      # Check for the --nootherips flag
      if 'repy_nootherips' in configuration['networkrestrictions'] and configuration['networkrestrictions']['repy_nootherips']:
        # Append the flag
        ip_iface_preference_flags.append("--nootherips")
        ip_iface_preference_str += "--nootherips "
    
  # Find the location where the sandbox files is located. Location of repyV1, repyV2 etc.
  prog_platform_location = os.path.join(prog_platform_dir[prog_platform], "repy.py")
 
  # I use absolute paths so that repy can still find the files after it 
  # changes directories...
  
  # Conrad: switched this to sequence-style Popen invocation so that spaces
  # in files work. Switched it back to absolute paths.
  command = [sys.executable, prog_platform_location] + ip_iface_preference_flags + [
      "--logfile", os.path.abspath(vesseldict[vesselname]['logfilename']),
      "--stop",    os.path.abspath(vesseldict[vesselname]['stopfilename']),
      "--status",  os.path.abspath(vesseldict[vesselname]['statusfilename']),
      "--cwd",     os.path.abspath(vesselname),
      "--servicelog", 
      "--execinfo",
      os.path.abspath(vesseldict[vesselname]['resourcefilename'])] + argstring.split()

  portable_popen.Popen(command)


  starttime = nonportable.getruntime()

  # wait for 10 seconds for it to start (else return an error)
  while nonportable.getruntime()-starttime < 10:
    newstatus, newtimestamp = statusstorage.read_status(vesseldict[vesselname]['statusfilename'])
    # Great!   The timestamp was updated...   The new status is the result of 
    # our work.   Let's tell the user what happened...
    if newtimestamp != oldtimestamp and newstatus != None:
      break

    

    # sleep while busy waiting...
    time.sleep(.5)

  else:
    return "Did not start in a timely manner\nWarning"

  # We need to update the status in the table because the status thread might
  # not notice this before our next request... (else occasional failures on XP)
  nmstatusmonitor.update_status(vesseldict, vesselname, newstatus, newtimestamp)


  return newstatus+"\nSuccess"
Example #22
0
def startvessel_ex(vesselname, prog_platform, argstring):

    # Convert the programming platform to lowercase to make
    # it case insensitive.
    prog_platform = prog_platform.lower()

    if vesselname not in vesseldict:
        raise BadRequest, "No such vessel"

    if vesseldict[vesselname]['status'] == 'Started':
        raise BadRequest("Vessel has already been started")

    if prog_platform not in prog_platform_dir.keys():
        raise BadRequest("Programming language platform is not supported.")

    # remove any prior stop file so that we can start
    if os.path.exists(vesseldict[vesselname]['stopfilename']):
        os.remove(vesseldict[vesselname]['stopfilename'])

    for char in argstring:
        if char not in allowedchars:
            raise BadRequest("Character '" + char +
                             "' not allowed in arguments")

    # I'm going to capture the status and timestamp and then check the see if
    # the timestamp is updated...
    oldstatus, oldtimestamp = statusstorage.read_status(
        vesseldict[vesselname]['statusfilename'])

    # Armon: this is required to fetch the networkrestrictions information from the configuration
    configuration = persist.restore_object("nodeman.cfg")

    # Armon: Generate the IP/Iface preferences if they exist
    ip_iface_preference_flags = []
    ip_iface_preference_str = ""  # Needed for Win Mobile

    # Only add the flags if everything necessary exists
    if 'networkrestrictions' in configuration and 'repy_restricted' in configuration['networkrestrictions'] \
      and configuration['networkrestrictions']['repy_restricted'] and 'repy_user_preference' in configuration['networkrestrictions']:
        # Generate the preference list
        for (is_ip, value
             ) in configuration['networkrestrictions']['repy_user_preference']:
            # Append the correct flag
            if is_ip:
                ip_iface_preference_flags.append("--ip")
                ip_iface_preference_str += "--ip "
            else:
                ip_iface_preference_flags.append("--iface")
                ip_iface_preference_str += "--iface "

            # Append the value
            ip_iface_preference_flags.append(value)
            ip_iface_preference_str += "'" + value + "' "

        # Check for the --nootherips flag
        if 'repy_nootherips' in configuration[
                'networkrestrictions'] and configuration[
                    'networkrestrictions']['repy_nootherips']:
            # Append the flag
            ip_iface_preference_flags.append("--nootherips")
            ip_iface_preference_str += "--nootherips "

    # Find the location where the sandbox files is located. Location of repyV1, repyV2 etc.
    prog_platform_location = os.path.join(prog_platform_dir[prog_platform],
                                          "repy.py")

    # I use absolute paths so that repy can still find the files after it
    # changes directories...

    # Conrad: switched this to sequence-style Popen invocation so that spaces
    # in files work. Switched it back to absolute paths.
    command = [
        sys.executable, prog_platform_location
    ] + ip_iface_preference_flags + [
        "--logfile",
        os.path.abspath(vesseldict[vesselname]['logfilename']), "--stop",
        os.path.abspath(vesseldict[vesselname]['stopfilename']), "--status",
        os.path.abspath(vesseldict[vesselname]['statusfilename']), "--cwd",
        os.path.abspath(vesselname), "--servicelog", "--execinfo",
        os.path.abspath(vesseldict[vesselname]['resourcefilename'])
    ] + argstring.split()

    portable_popen.Popen(command)

    starttime = nonportable.getruntime()

    # wait for 10 seconds for it to start (else return an error)
    while nonportable.getruntime() - starttime < 10:
        newstatus, newtimestamp = statusstorage.read_status(
            vesseldict[vesselname]['statusfilename'])
        # Great!   The timestamp was updated...   The new status is the result of
        # our work.   Let's tell the user what happened...
        if newtimestamp != oldtimestamp and newstatus != None:
            break

        # sleep while busy waiting...
        time.sleep(.5)

    else:
        return "Did not start in a timely manner\nWarning"

    # We need to update the status in the table because the status thread might
    # not notice this before our next request... (else occasional failures on XP)
    nmstatusmonitor.update_status(vesseldict, vesselname, newstatus,
                                  newtimestamp)

    return newstatus + "\nSuccess"
def uninstall_Linux_and_Mac():
  """
  <Purpose>
    Remove the seattle entry from the crontab, and kill all seattle processes
    by using stop_all_seattle_processes.py
  <Arguments>
    None.
  <Exceptions>
    SeattleNotInstalledError if seattle had not been initially installed when
      the uninstaller was run.
    UnsupportedOSError from a child function if the OS running this script is
      not supported.
  <Side Effects>
    Removes the seattle entry from the crontab and stops seattle from running.
  <Returns>
    True if succeeded in uninstalling,
    False otherwise.
  """


  # Derek Cheng: Find out if this is a Nokia N800/900 Tablet, and if so runs a 
  # separate uninstaller because there is no crontab on the tablets.
  if platform.machine().startswith('armv'):
    return uninstall_nokia()

  if platform.machine().startswith('mips'):
    return True


  # Find out if Seattle is installed (currently in the crontab), and remove if
  # so.
  crontab_contents_stdout = subprocess.Popen(["crontab","-l"],
                                             stdout=subprocess.PIPE,
                                             stderr=subprocess.PIPE).stdout

  # Generate a temp file with the user's crontab minus our task.
  temp_crontab_file = tempfile.NamedTemporaryFile()

  seattle_crontab_entry_found = False
  for line in crontab_contents_stdout:
    if not seattleinstaller.get_starter_file_name() in line:
      temp_crontab_file.write(line)
    else:
      seattle_crontab_entry_found = True


  if not seattle_crontab_entry_found:
    temp_crontab_file.close()
    raise SeattleNotInstalledError("Seattle cannot be uninstalled because it " \
                                     + "is not currently installed.")



  # Replace the old crontab with the updated crontab contents.
  temp_crontab_file.flush()
  replace_crontab = subprocess.Popen(["crontab",temp_crontab_file.name],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
  replace_crontab.wait()
  temp_crontab_file.close()



  # Confirm that seattle was successfully removed from the crontab, and set
  # the 'crontab_updated_for_2009_installer' value in nodeman.cfg to False.
  modified_crontab_contents_stdout,modified_crontab_contents_stderr = \
      subprocess.Popen(["crontab","-l"],
                       stdout=subprocess.PIPE,
                       stderr=subprocess.PIPE).communicate()

  if seattleinstaller.get_starter_file_name() \
        in modified_crontab_contents_stdout:
    return False
  else:
    # Stop all instances of seattle from running before returning.
    stop_all_seattle_processes.main()

    configuration = persist.restore_object("nodeman.cfg")
    configuration['crontab_updated_for_2009_installer'] = False
    persist.commit_object(configuration,"nodeman.cfg")

    return True
Example #24
0
def handle_threading_error():
    """
  <Purpose>
    Handles a repy node failing with ThreadErr. If repy is allowed to use
    more than 10% of the current threads, reduce the global thread count by 50%
    and stop all existing vessels

  <Arguments>
    None
  
  <Exceptions>
    None

  <Side Effects>
    May re-write all resource files and stop all vessels

  <Returns>
    None
  """
    # Make a log of this
    servicelogger.log(
        "[ERROR]:A Repy vessel has exited with ThreadErr status. Checking to determine next step"
    )

    # Get all the names of the vessels
    vesselnamelist = nmAPI.vesseldict.keys()

    # read in all of the resource files so that we can look at and possibly
    # manipulate them.
    resourcedicts = {}
    for vesselname in vesselnamelist:
        resourcedicts[
            vesselname] = resourcemanipulation.read_resourcedict_from_file(
                'resource.' + vesselname)

    # Get the number of threads Repy has allocated
    allowedthreadcount = 0
    for vesselname in vesselnamelist:
        allowedthreadcount = allowedthreadcount + resourcedicts[vesselname][
            'events']

    # Get the total number os system threads currently used
    totalusedthreads = nonportable.os_api.get_system_thread_count()

    # Log this information
    servicelogger.log("[WARNING]:System Threads: " + str(totalusedthreads) +
                      "  Repy Allocated Threads: " + str(allowedthreadcount))

    # Get the NM configuration
    configuration = persist.restore_object("nodeman.cfg")

    # Check if there is a threshold configuration,
    # otherwise add the default configuration
    if NOOP_CONFIG_KEY in configuration:
        threshold = configuration[NOOP_CONFIG_KEY]
    else:
        threshold = DEFAULT_NOOP_THRESHOLD
        configuration[NOOP_CONFIG_KEY] = threshold
        persist.commit_object(configuration, "nodeman.cfg")

    # Check if we are below the threshold, if so
    # then just return, this is a noop
    if allowedthreadcount < totalusedthreads * threshold:
        return

    servicelogger.log("[ERROR]:Reducing number of system threads!")

    #### We are above the threshold!   Let's cut everything by 1/2

    # First, update the resource files
    for vesselname in vesselnamelist:
        # cut the events by 1/2
        resourcedicts[vesselname][
            'events'] = resourcedicts[vesselname]['events'] / 2
        # write out the new resource files...
        resourcemanipulation.write_resourcedict_to_file(
            resourcedicts[vesselname], 'resource.' + vesselname)

    # Create the stop tuple, exit code 57 with an error message
    stoptuple = (57,
                 "Fatal system-wide threading error! Stopping all vessels.")

    # Stop each vessel
    for vesselname in vesselnamelist:
        try:
            # Stop each vessel, using our stoptuple
            nmAPI.stopvessel(vesselname, stoptuple)
        except Exception, exp:
            # Forge on, regardless of errors
            servicelogger.log(
                "[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: "
                + str(exp))
            servicelogger.log_last_exception()
Example #25
0
def main():
  global configuration

  if not FOREGROUND:
    # Background ourselves.
    daemon.daemonize()


  # Check if we are running in testmode.
  if TEST_NM:
    nodemanager_pid = os.getpid()
    servicelogger.log("[INFO]: Running nodemanager in test mode on port <nodemanager_port>, "+
                      "pid %s." % str(nodemanager_pid))
    nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'), 'w')
    
    # Write out the pid of the nodemanager process that we started to a file.
    # This is only done if the nodemanager was started in test mode.
    try:
      nodeman_pid_file.write(str(nodemanager_pid))
    finally:
      nodeman_pid_file.close()

  else:
    # ensure that only one instance is running at a time...
    gotlock = runonce.getprocesslock("seattlenodemanager")

    if gotlock == True:
      # I got the lock.   All is well...
      pass
    else:
      if gotlock:
        servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + 
                        ") is running")
      else:
        servicelogger.log("[ERROR]:Another node manager process is running")
      return



  # Feature add for #1031: Log information about the system in the nm log...
  servicelogger.log('[INFO]:platform.python_version(): "' + 
    str(platform.python_version())+'"')
  servicelogger.log('[INFO]:platform.platform(): "' + 
    str(platform.platform())+'"')

  # uname on Android only yields 'Linux', let's be more specific.
  try:
    import android
    servicelogger.log('[INFO]:platform.uname(): Android / "' + 
      str(platform.uname())+'"')
  except ImportError:
    servicelogger.log('[INFO]:platform.uname(): "'+str(platform.uname())+'"')

  # I'll grab the necessary information first...
  servicelogger.log("[INFO]:Loading config")
  # BUG: Do this better?   Is this the right way to engineer this?
  configuration = persist.restore_object("nodeman.cfg")
  
  
  # Armon: initialize the network restrictions
  initialize_ip_interface_restrictions(configuration)
  
  
  
  # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
  #            seattle crontab entry has been installed in the crontab.
  #            Do this here because the "nodeman.cfg" needs to have been read
  #            into configuration via the persist module.
  if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
    if 'crontab_updated_for_2009_installer' not in configuration or \
          configuration['crontab_updated_for_2009_installer'] == False:
      try:
        # crontab may not exist on Android, therefore let's not check
        # if we are running on Android. See #1302 and #1254.
        try:
          import android
        except ImportError:
          import update_crontab_entry
          modified_crontab_entry = \
              update_crontab_entry.modify_seattle_crontab_entry()
          # If updating the seattle crontab entry succeeded, then update the
          # 'crontab_updated_for_2009_installer' so the nodemanager no longer
          # tries to update the crontab entry when it starts up.
          if modified_crontab_entry:
            configuration['crontab_updated_for_2009_installer'] = True
            persist.commit_object(configuration,"nodeman.cfg")

      except Exception,e:
        exception_traceback_string = traceback.format_exc()
        servicelogger.log("[ERROR]: The following error occured when " \
                            + "modifying the crontab for the new 2009 " \
                            + "seattle crontab entry: " \
                            + exception_traceback_string)
Example #26
0
def handle_threading_error(nmAPI):
    """
  <Purpose>
    Handles a repy node failing with ThreadErr. Reduces global thread count by 50%.
    Restarts all existing vesselts

  <Arguments>
    nmAPI: the nmAPI module -- passed to the function to avoid import loops;
           see ticket #590 for more information about this.
  """
    # Make a log of this
    servicelogger.log(
        "[ERROR]:A Repy vessel has exited with ThreadErr status. Patching restrictions and reseting all vessels."
    )

    # Get the number of threads Repy has allocated
    allocatedThreads = get_allocated_threads()

    # Get the number os system threads currently
    systemThreads = nonportable.os_api.get_system_thread_count()

    # Log this information
    servicelogger.log("[ERROR]:System Threads: " + str(systemThreads) +
                      "  Repy Allocated Threads: " + str(allocatedThreads))

    # Get the NM configuration
    configuration = persist.restore_object("nodeman.cfg")

    # Check if there is a threshold configuration,
    # otherwise add the default configuration
    if NOOP_CONFIG_KEY in configuration:
        threshold = configuration[NOOP_CONFIG_KEY]
    else:
        threshold = DEFAULT_NOOP_THRESHOLD
        configuration[NOOP_CONFIG_KEY] = threshold
        persist.commit_object(configuration, "nodeman.cfg")

    # Check if we are below the threshold, if so
    # then just return, this is a noop
    if allocatedThreads < systemThreads * threshold:
        return

    # We are continuing, so we are above the threshold!
    # First, update the restrictions
    update_restrictions()

    # Then, stop the vessels
    # Get all the vessels
    vessels = nmAPI.vesseldict.keys()

    # Create the stop tuple, exit code 57 with an error message
    stoptuple = (57,
                 "Fatal system-wide threading error! Stopping all vessels.")

    # Stop each vessel
    for vessel in vessels:
        try:
            # Stop each vessel, using our stoptuple
            nmAPI.stopvessel(vessel, stoptuple)
        except Exception, exp:
            # Forge on, regardless of errors
            servicelogger.log(
                "[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: "
                + str(exp))
            servicelogger.log_last_exception()