コード例 #1
0
def initialize_keys(keybitsize, nodemanager_directory="."):
  # nodemanager_directory: The directory in which the nodeman.cfg file is
  #                        located.

  # initialize my configuration file.   This involves a few variables:
  #    pollfrequency --  the amount of time to sleep after a check when "busy
  #                      waiting".   This trades CPU load for responsiveness.
  #    ports         --  the ports the node manager could listen on.
  #    publickey     --  the public key used to identify the node...
  #    privatekey    --  the corresponding private key for the node...
  #
  # Only the public key and private key are set here...
  configuration = persist.restore_object('nodeman.cfg')

  curdir = os.getcwd()
  os.chdir(nodemanager_directory)


  keys = rsa.rsa_gen_pubpriv_keys(keybitsize)
  configuration['publickey'] = keys[0]
  configuration['privatekey'] = keys[1]

  persist.commit_object(configuration, "nodeman.cfg")


  os.chdir(curdir)
コード例 #2
0
def inject(key, value, cfile="nodeman.cfg"):
  configuration = persist.restore_object(cfile)

  # Inject the dictionary
  configuration[key] = value

  persist.commit_object(configuration, cfile)
コード例 #3
0
def initialize_keys(keybitsize,nodemanager_directory="."):
  # nodemanager_directory: The directory in which the nodeman.cfg file is
  #                        located.

  # initialize my configuration file.   This involves a few variables:
  #    pollfrequency --  the amount of time to sleep after a check when "busy
  #                      waiting".   This trades CPU load for responsiveness.
  #    ports         --  the ports the node manager could listen on.
  #    publickey     --  the public key used to identify the node...
  #    privatekey    --  the corresponding private key for the node...
  #
  # Only the public key and private key are set here...
  configuration = persist.restore_object('nodeman.cfg')

  curdir = os.getcwd()
  os.chdir(nodemanager_directory)


  keys = rsa_gen_pubpriv_keys(keybitsize)
  configuration['publickey'] = keys[0]
  configuration['privatekey'] = keys[1]

  persist.commit_object(configuration,"nodeman.cfg")


  os.chdir(curdir)
コード例 #4
0
ファイル: nmAPI.py プロジェクト: JakobKallin/Seastorm-Preview
def joinvessels(vesselname1, vesselname2):
  if vesselname1 not in vesseldict:
    raise BadRequest, "No such vessel '"+vesselname1+"'"
  if vesselname2 not in vesseldict:
    raise BadRequest, "No such vessel '"+vesselname2+"'"

  if vesseldict[vesselname1]['ownerkey'] != vesseldict[vesselname2]['ownerkey']:
    raise BadRequest("Vessels must have the same owner")
  
  if vesseldict[vesselname1]['status'] == 'Started' or vesseldict[vesselname2]['status'] == 'Started':
    raise BadRequest("Attempting to join a running vessel")
  
  # get the new name
  newname = get_new_vessel_name()


  currentresourcedict1, call_list_v1 = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname1]['resourcefilename'])
  currentresourcedict2, call_list_v2 = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname2]['resourcefilename'])
  offcutresourcedict, offcut_call_list = resourcemanipulation.read_resourcedict_from_file(offcutfilename)


  # the final resources are reconstructed from the offcut + vessel1 + vessel2
  intermediateresourcedict = resourcemanipulation.add_resourcedicts(currentresourcedict1, currentresourcedict2)

  finalresourcedict = resourcemanipulation.add_resourcedicts(intermediateresourcedict, offcutresourcedict)

  # MMM: We add in the call list of resources from one of the original 
  # resource file. Since this is for backward compatibility for Repy V1,
  # it does not matter which call list we add in to the final resource file.
  _setup_vessel(newname, vesselname1, finalresourcedict, call_list_v1)
  _destroy_vessel(vesselname1)
  _destroy_vessel(vesselname2)
    
  persist.commit_object(vesseldict, "vesseldict")
  return newname+"\nSuccess"
コード例 #5
0
ファイル: nmAPI.py プロジェクト: pinaksaha/reference_monitor
def joinvessels(vesselname1, vesselname2):
  if vesselname1 not in vesseldict:
    raise BadRequest, "No such vessel '"+vesselname1+"'"
  if vesselname2 not in vesseldict:
    raise BadRequest, "No such vessel '"+vesselname2+"'"

  if vesseldict[vesselname1]['ownerkey'] != vesseldict[vesselname2]['ownerkey']:
    raise BadRequest("Vessels must have the same owner")
  
  if vesseldict[vesselname1]['status'] == 'Started' or vesseldict[vesselname2]['status'] == 'Started':
    raise BadRequest("Attempting to join a running vessel")
  
  # get the new name
  newname = get_new_vessel_name()


  currentresourcedict1 = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname1]['resourcefilename'])
  currentresourcedict2 = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname2]['resourcefilename'])
  offcutresourcedict = resourcemanipulation.read_resourcedict_from_file(offcutfilename)


  # the final resources are reconstructed from the offcut + vessel1 + vessel2
  intermediateresourcedict = resourcemanipulation.add_resourcedicts(currentresourcedict1, currentresourcedict2)

  finalresourcedict = resourcemanipulation.add_resourcedicts(intermediateresourcedict, offcutresourcedict)

  _setup_vessel(newname, vesselname1, finalresourcedict)
  _destroy_vessel(vesselname1)
  _destroy_vessel(vesselname2)
    
  persist.commit_object(vesseldict, "vesseldict")
  return newname+"\nSuccess"
コード例 #6
0
ファイル: nminit.py プロジェクト: SeattleTestbed/nodemanager
def initialize_state():

  # first, let's clean up any existing directory data...
  for vesseldirectoryname in glob.glob('v[0-9]*'):
    if os.path.isdir(vesseldirectoryname):
      print 'Removing:',vesseldirectoryname
      shutil.rmtree(vesseldirectoryname)

  # initialize my configuration file.   This involves a few variables:
  #    seattle_installed -- signal that the installation succeeded
  #    pollfrequency --  the amount of time to sleep after a check when "busy
  #                      waiting".   This trades CPU load for responsiveness.
  #    ports         --  the ports the node manager could listen on.
  #    publickey     --  the public key used to identify the node...
  #    privatekey    --  the corresponding private key for the node...
  configuration = {}

  configuration['pollfrequency'] = 1.0

  # NOTE: I chose these randomly (they will be uniform across all NMs)...   
  # Was this wise?
  configuration['ports'] = [1224, 2888, 9625, 10348, 39303, 48126, 52862, 57344, 64310]

  print "Generating key..."
  keys = rsa_gen_pubpriv_keys(100)
  configuration['seattle_installed'] = True
  configuration['publickey'] = keys[0]
  configuration['privatekey'] = keys[1]
  configuration['service_vessel'] = 'v2'

  print "Writing config file..."
  # write the config file...
  persist.commit_object(configuration,"nodeman.cfg")

  # write the offcut file...
  outfo = open("resources.offcut","w")
  outfo.write(offcutresourcedata)
  outfo.close()

#  vessel1 = make_vessel('v1',controllerpubkey,bigresourcedata, []) 
  vessel1 = make_vessel('v1',controllerpubkey,smallresourcedata, ('12345','12346', '12347','12348','12345','12346','12347','12348')) 
  vessel2 = make_vessel('v2',justinpubkey,smallresourcedata, ('20000','20001', '20002','20003','20000','20001','20002','20003')) 
  vessel3 = make_vessel('v3',guest0pubkey,smallresourcedata, ('30000','30001', '30002','30003','30000','30001','30002','30003')) 
  vessel4 = make_vessel('v4',guest0pubkey,smallresourcedata, ('21000','21001', '21002','21003','21000','21001','21002','21003')) 
  vessel5 = make_vessel('v5',guest1pubkey,smallresourcedata, ('22000','22001', '22002','22003','22000','22001','22002','22003')) 
  vessel6 = make_vessel('v6',guest1pubkey,smallresourcedata, ('23000','23001', '23002','23003','23000','23001','23002','23003')) 
  vessel7 = make_vessel('v7',guest2pubkey,smallresourcedata, ('24000','24001', '24002','24003','24000','24001','24002','24003')) 
  vessel8 = make_vessel('v8',guest2pubkey,smallresourcedata, ('25000','25001', '25002','25003','25000','25001','25002','25003')) 
  vessel9 = make_vessel('v9',guest3pubkey,smallresourcedata, ('26000','26001', '26002','26003','26000','26001','26002','26003')) 
  vessel10 = make_vessel('v10',guest3pubkey,smallresourcedata, ('27000','27001', '27002','27003','27000','27001','27002','27003')) 
  

  vesseldict = {'v1':vessel1, 'v2':vessel2, 'v3':vessel3, 'v4':vessel4, 'v5':vessel5, 'v6':vessel6, 'v7':vessel7, 'v8':vessel8, 'v9':vessel9, 'v10':vessel10}

  print "Writing vessel dictionary..."
  # write out the vessel dictionary...
  persist.commit_object(vesseldict,"vesseldict")

  print "Done initializing state."
コード例 #7
0
ファイル: nmmain.py プロジェクト: Ashmita89/attic
def main():

  global configuration

  if not FOREGROUND:
    # Background ourselves.
    daemon.daemonize()

  # ensure that only one instance is running at a time...
  gotlock = runonce.getprocesslock("seattlenodemanager")
  if gotlock == True:
    # I got the lock.   All is well...
    pass
  else:
    if gotlock:
      servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + 
          ") is running")
    else:
      servicelogger.log("[ERROR]:Another node manager process is running")
    return

  
  # I'll grab the necessary information first...
  servicelogger.log("[INFO]:Loading config")
  # BUG: Do this better?   Is this the right way to engineer this?
  configuration = persist.restore_object("nodeman.cfg")
  
  # Armon: initialize the network restrictions
  initialize_ip_interface_restrictions(configuration)
  
  
  
  # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
  #            seattle crontab entry has been installed in the crontab.
  #            Do this here because the "nodeman.cfg" needs to have been read
  #            into configuration via the persist module.
  if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
    if 'crontab_updated_for_2009_installer' not in configuration or \
          configuration['crontab_updated_for_2009_installer'] == False:
      try:
        import update_crontab_entry
        modified_crontab_entry = \
            update_crontab_entry.modify_seattle_crontab_entry()
        # If updating the seattle crontab entry succeeded, then update the
        # 'crontab_updated_for_2009_installer' so the nodemanager no longer
        # tries to update the crontab entry when it starts up.
        if modified_crontab_entry:
          configuration['crontab_updated_for_2009_installer'] = True
          persist.commit_object(configuration,"nodeman.cfg")

      except Exception,e:
        exception_traceback_string = traceback.format_exc()
        servicelogger.log("[ERROR]: The following error occured when " \
                            + "modifying the crontab for the new 2009 " \
                            + "seattle crontab entry: " \
                            + exception_traceback_string)
コード例 #8
0
ファイル: nminit.py プロジェクト: SeattleTestbed/attic
def initialize_state():

  # first, let's clean up any existing directory data...
  for vesseldirectoryname in glob.glob('v[0-9]*'):
    if os.path.isdir(vesseldirectoryname):
      print 'Removing:',vesseldirectoryname
      shutil.rmtree(vesseldirectoryname)

  # initialize my configuration file.   This involves a few variables:
  #    pollfrequency --  the amount of time to sleep after a check when "busy
  #                      waiting".   This trades CPU load for responsiveness.
  #    ports         --  the ports the node manager could listen on.
  #    publickey     --  the public key used to identify the node...
  #    privatekey    --  the corresponding private key for the node...
  configuration = {}

  configuration['pollfrequency'] = 1.0

  # NOTE: I chose these randomly (they will be uniform across all NMs)...   
  # Was this wise?
  configuration['ports'] = [<nodemanager_port>, 2888, 9625, 10348, 39303, 48126, 52862, 57344, 64310]

  print "Generating key..."
  keys = rsa_gen_pubpriv_keys(100)
  configuration['publickey'] = keys[0]
  configuration['privatekey'] = keys[1]
  configuration['service_vessel'] = 'v2'

  print "Writing config file..."
  # write the config file...
  persist.commit_object(configuration,"nodeman.cfg")

  # write the offcut file...
  outfo = open("resources.offcut","w")
  outfo.write(offcutresourcedata)
  outfo.close()

#  vessel1 = make_vessel('v1',controllerpubkey,bigresourcedata, []) 
  vessel1 = make_vessel('v1',controllerpubkey,smallresourcedata, ('12345','12346', '12347','12348','12345','12346','12347','12348')) 
  vessel2 = make_vessel('v2',justinpubkey,smallresourcedata, ('20000','20001', '20002','20003','20000','20001','20002','20003')) 
  vessel3 = make_vessel('v3',guest0pubkey,smallresourcedata, ('30000','30001', '30002','30003','30000','30001','30002','30003')) 
  vessel4 = make_vessel('v4',guest0pubkey,smallresourcedata, ('21000','21001', '21002','21003','21000','21001','21002','21003')) 
  vessel5 = make_vessel('v5',guest1pubkey,smallresourcedata, ('22000','22001', '22002','22003','22000','22001','22002','22003')) 
  vessel6 = make_vessel('v6',guest1pubkey,smallresourcedata, ('23000','23001', '23002','23003','23000','23001','23002','23003')) 
  vessel7 = make_vessel('v7',guest2pubkey,smallresourcedata, ('24000','24001', '24002','24003','24000','24001','24002','24003')) 
  vessel8 = make_vessel('v8',guest2pubkey,smallresourcedata, ('25000','25001', '25002','25003','25000','25001','25002','25003')) 
  vessel9 = make_vessel('v9',guest3pubkey,smallresourcedata, ('26000','26001', '26002','26003','26000','26001','26002','26003')) 
  vessel10 = make_vessel('v10',guest3pubkey,smallresourcedata, ('27000','27001', '27002','27003','27000','27001','27002','27003')) 
  

  vesseldict = {'v1':vessel1, 'v2':vessel2, 'v3':vessel3, 'v4':vessel4, 'v5':vessel5, 'v6':vessel6, 'v7':vessel7, 'v8':vessel8, 'v9':vessel9, 'v10':vessel10}

  print "Writing vessel dictionary..."
  # write out the vessel dictionary...
  persist.commit_object(vesseldict,"vesseldict")
コード例 #9
0
ファイル: nmAPI.py プロジェクト: JakobKallin/Seastorm-Preview
def changeadvertise(vesselname, setting):
  if vesselname not in vesseldict:
    raise BadRequest, "No such vessel"
  if setting == 'True':
    vesseldict[vesselname]['advertise'] = True
  elif setting == 'False':
    vesseldict[vesselname]['advertise'] = False
  else: 
    raise BadRequest("Invalid advertisement setting '"+setting+"'")

  persist.commit_object(vesseldict, "vesseldict")
  return "\nSuccess"
コード例 #10
0
def changeadvertise(vesselname, setting):
    if vesselname not in vesseldict:
        raise BadRequest, "No such vessel"
    if setting == 'True':
        vesseldict[vesselname]['advertise'] = True
    elif setting == 'False':
        vesseldict[vesselname]['advertise'] = False
    else:
        raise BadRequest("Invalid advertisement setting '" + setting + "'")

    persist.commit_object(vesseldict, "vesseldict")
    return "\nSuccess"
コード例 #11
0
ファイル: nmAPI.py プロジェクト: JakobKallin/Seastorm-Preview
def changeownerinformation(vesselname, ownerstring):
  if vesselname not in vesseldict:
    raise BadRequest, "No such vessel"
  if len(ownerstring) > maxownerstringlength:
    raise BadRequest("String Too Long")

  if '\n' in ownerstring:
    raise BadRequest("String may not contain newline character")

  vesseldict[vesselname]['ownerinformation'] = ownerstring

  persist.commit_object(vesseldict, "vesseldict")
  return "\nSuccess"
コード例 #12
0
def changeownerinformation(vesselname, ownerstring):
    if vesselname not in vesseldict:
        raise BadRequest, "No such vessel"
    if len(ownerstring) > maxownerstringlength:
        raise BadRequest("String Too Long")

    if '\n' in ownerstring:
        raise BadRequest("String may not contain newline character")

    vesseldict[vesselname]['ownerinformation'] = ownerstring

    persist.commit_object(vesseldict, "vesseldict")
    return "\nSuccess"
コード例 #13
0
def joinvessels(vesselname1, vesselname2):
    if vesselname1 not in vesseldict:
        raise BadRequest, "No such vessel '" + vesselname1 + "'"
    if vesselname2 not in vesseldict:
        raise BadRequest, "No such vessel '" + vesselname2 + "'"

    if vesseldict[vesselname1]['ownerkey'] != vesseldict[vesselname2][
            'ownerkey']:
        raise BadRequest("Vessels must have the same owner")

    if vesseldict[vesselname1]['status'] == 'Started' or vesseldict[
            vesselname2]['status'] == 'Started':
        raise BadRequest("Attempting to join a running vessel")

    # get the new name
    newname = get_new_vessel_name()

    currentresourcedict1, call_list_v1 = resourcemanipulation.read_resourcedict_from_file(
        vesseldict[vesselname1]['resourcefilename'])
    currentresourcedict2, call_list_v2 = resourcemanipulation.read_resourcedict_from_file(
        vesseldict[vesselname2]['resourcefilename'])
    offcutresourcedict, offcut_call_list = resourcemanipulation.read_resourcedict_from_file(
        offcutfilename)

    # the final resources are reconstructed from the offcut + vessel1 + vessel2
    intermediateresourcedict = resourcemanipulation.add_resourcedicts(
        currentresourcedict1, currentresourcedict2)

    finalresourcedict = resourcemanipulation.add_resourcedicts(
        intermediateresourcedict, offcutresourcedict)

    # MMM: We add in the call list of resources from one of the original
    # resource file. Since this is for backward compatibility for Repy V1,
    # it does not matter which call list we add in to the final resource file.
    _setup_vessel(newname, vesselname1, finalresourcedict, call_list_v1)
    _destroy_vessel(vesselname1)
    _destroy_vessel(vesselname2)

    persist.commit_object(vesseldict, "vesseldict")
    return newname + "\nSuccess"
コード例 #14
0
ファイル: nmAPI.py プロジェクト: JakobKallin/Seastorm-Preview
def changeowner(vesselname, newkeystring):
  if vesselname not in vesseldict:
    raise BadRequest, "No such vessel"
  try:
    newkey = rsa_string_to_publickey(newkeystring)
  except ValueError:
    raise BadRequest("Invalid Key String")
    
  # check the key 
  if not rsa_is_valid_publickey(newkey):
    raise BadRequest("Invalid Key")

  vesseldict[vesselname]['ownerkey'] = newkey 

  # Must reset the owner information because it's used for service security.
  vesseldict[vesselname]['ownerinformation'] = ''

  # Reset the advertise flag so the owner can find the node...
  vesseldict[vesselname]['advertise'] = True

  persist.commit_object(vesseldict, "vesseldict")
  return "\nSuccess"
コード例 #15
0
def changeowner(vesselname, newkeystring):
    if vesselname not in vesseldict:
        raise BadRequest, "No such vessel"
    try:
        newkey = rsa_string_to_publickey(newkeystring)
    except ValueError:
        raise BadRequest("Invalid Key String")

    # check the key
    if not rsa_is_valid_publickey(newkey):
        raise BadRequest("Invalid Key")

    vesseldict[vesselname]['ownerkey'] = newkey

    # Must reset the owner information because it's used for service security.
    vesseldict[vesselname]['ownerinformation'] = ''

    # Reset the advertise flag so the owner can find the node...
    vesseldict[vesselname]['advertise'] = True

    persist.commit_object(vesseldict, "vesseldict")
    return "\nSuccess"
コード例 #16
0
ファイル: nmAPI.py プロジェクト: JakobKallin/Seastorm-Preview
def changeusers(vesselname, listofkeysstring):
  if vesselname not in vesseldict:
    raise BadRequest, "No such vessel"
  newkeylist = []

  # check the keys
  for keystring in listofkeysstring.split('|'):
    if keystring == '':
      continue

    try:
      newkey = rsa_string_to_publickey(keystring)
    except ValueError:
      raise BadRequest("Invalid Key String '"+keystring+"'")

    if not rsa_is_valid_publickey(newkey):
      raise BadRequest("Invalid Key '"+keystring+"'")
    
    newkeylist.append(newkey)

  vesseldict[vesselname]['userkeys'] = newkeylist
    
  persist.commit_object(vesseldict, "vesseldict")
  return "\nSuccess"
コード例 #17
0
def changeusers(vesselname, listofkeysstring):
    if vesselname not in vesseldict:
        raise BadRequest, "No such vessel"
    newkeylist = []

    # check the keys
    for keystring in listofkeysstring.split('|'):
        if keystring == '':
            continue

        try:
            newkey = rsa_string_to_publickey(keystring)
        except ValueError:
            raise BadRequest("Invalid Key String '" + keystring + "'")

        if not rsa_is_valid_publickey(newkey):
            raise BadRequest("Invalid Key '" + keystring + "'")

        newkeylist.append(newkey)

    vesseldict[vesselname]['userkeys'] = newkeylist

    persist.commit_object(vesseldict, "vesseldict")
    return "\nSuccess"
コード例 #18
0
def main(vesselcreationlist, tenpercentdict, targetdirectory):
    """
  <Purpose>
    To take the vesselcreationlist (list containing owner and user info)
    and generate the initial resource files and directories. 
    
    Working Definition: 
      percentage - in this method we are dealing with how to split up the 
      donated resources (this could 10% of the system's available resources, 
      or it could be a user defined percentage but regardless we only 
      consider the portion donated). So any time we refer to percentages 
      in this method we are talking about the percentage of the donated 
      resources. This applies to the tenpercentdict that is passed as an
      arguement (it is only ten percent of the donated resources).
    
    This method does not special case the seattle vessel, it only creates
    vessels based off what is in the vesselcreationlist list (the original
    writecustominstaller handled the seattle vessel seperately).
    
  <Arguements>
    vesselcreationlist: a list that has the following format:
    [(vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),
      (vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),...]
      It contains the information for the initial vessels of the node.
     
    targetdirectory: a string containing the directory where the 
      restrictions files and vesseldict will be placed.
     
  <Exceptions>
    ValueError: if the vessel percent (from element 0 of a tuple in the 
      vesselcreationlist list) is not 10,20,30, ... , 80, 90, 100
      A ValueError will also be raised if the total percent (of all
      the vessel percents in the vesselcreationlist list) does not add
      up to the integer 100

    IOError: if vessel.restrictions is not found, this must be included
      ahead of time in the target directory.
  
    OSError, WindowsError: if os.mkdir is unable to create the vessel
      directories.
      
  <Side Effects>
    Creates the vesseldict using persist.commit_object.
    Creates resource files for each vessel with names of the form
      resource.v1, resource.v2, ...
    Creates a directory for each vessel with a name of the form v1, v2, ...
      
  <Return>
    None
  
  <Notes>   
    The vesselcreationlist is a list that has the following format:
    [(vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),
     (vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames), ...]
  
    So it might look like:
      [(10, 'joe.publickey', []),
       (50, 'jim.publickey', ['alice.publickey', 'bob.publickey']), 
       (40, 'alice.publickey', []) ]
  
    This would indicate v1 is owned by joe and gets 10 percent, v2 is owned
    by jim has users alice and bob and gets 50 percent, v3 is owned by
    alice and gets 40 percent.   I'll track the seattle information and
    will write it independently.
  
  <Notes>
    The structure of this method can be described in 3 distinct sections.
    1) The vesseldict is constructed from the information in vesselcreationlist,
       is contains the public keys and other vessel information.
    2) The resource files are written to files for all the vessels
    3) Directories are created for all the vessels.
  """

    # Check to ensure that the vessel information is in the correct format.
    # Each vessel percent must be an integer multiple of ten from (0,100]
    # and the total must be 100. This is checked here instead of when the
    # list is originally constructed from the vesselinfo file so that only
    # this method must be changed if the granularity of the possible vessel
    # percentages is changed.
    total = 0

    for item in vesselcreationlist:
        if item[0] not in range(10, 101, 10):
            raise ValueError, "Invalid vessel percent '" + str(item[0]) + "'"
        total = total + item[0]

    if total != 100:
        raise ValueError, "Total of vessel percentage is '" + str(
            total) + "', not 100."

    vesseldict = {}
    vesselnumber = 1
    # time to work on the vessel dictionary...
    for item in vesselcreationlist:

        vesselname = 'v' + str(vesselnumber)

        vesseldict['v' + str(vesselnumber)] = {
            'userkeys': item[2],
            'ownerkey': item[1],
            'oldmetadata': None,
            'stopfilename': vesselname + '.stop',
            'logfilename': vesselname + '.log',
            'statusfilename': vesselname + '.status',
            'resourcefilename': 'resource.' + vesselname,
            'advertise': True,
            'ownerinformation': '',
            'status': 'Fresh'
        }
        vesselnumber = vesselnumber + 1

    persist.commit_object(vesseldict, targetdirectory + "/vesseldict")

    # I'm going to do the resources / restrictions now...
    onetenth = tenpercentdict.copy()

    # These are the restrictions that apply to all vessels, they are
    # not a resource we measure.
    restrictionsfo = file('vessel.restrictions')
    restrictionsstring = restrictionsfo.read()
    restrictionsfo.close()

    # I'll use this to figure out which ports to assign
    usedpercent = 0
    vesselnumber = 1

    for item in vesselcreationlist:

        # the percentcount variable is slightly confusing, up until we have talked
        # about vessels as haveing 10 or 20 or ... percent of the donated resources.
        # We restrict vessels to being a multiple of ten so that we do not have to
        # cut down the donated resources to far (this is an attempt to keep things
        # simple and avoid getting resources at zero).
        # percentcount should be an integer between 0 and 10
        percentcount = item[0] / 10
        # make a resource file of the right size...
        size = percentcount
        thisresourcedata = tenpercentdict.copy()
        while size > 1:
            thisresourcedata = nmresourcemath.add(thisresourcedata, onetenth)
            size = size - 1

        # I need the ports...
        startpercent = usedpercent
        endpercent = usedpercent + percentcount
        # a yucky way of getting the ports.   Should do 63100-63109 for the first,
        # 63110-63119 for the second, etc.
        thisresourcedata['messport'] = set(
            range(63100 + 10 * startpercent, 63100 + 10 * endpercent))
        thisresourcedata['connport'] = set(
            range(63100 + 10 * startpercent, 63100 + 10 * endpercent))

        nmresourcemath.write_resource_dict(
            thisresourcedata,
            targetdirectory + "/resource.v" + str(vesselnumber))

        # append the restrictions data.
        restrictionsfo = file(
            targetdirectory + '/resource.v' + str(vesselnumber), "a")
        restrictionsfo.write(restrictionsstring)
        restrictionsfo.close()

        # increment the vesselnumber and used percent
        vesselnumber = vesselnumber + 1
        usedpercent = usedpercent + percentcount

    # Get the directory, if any, that is used for security layers.
    configuration = persist.restore_object("nodeman.cfg")
    repy_prepend_dir = None
    if 'repy_prepend_dir' in configuration:
        repy_prepend_dir = configuration['repy_prepend_dir']

    # Get the list of files in the security layer directory
    repy_prepend_files = []
    if repy_prepend_dir is not None:
        repy_prepend_files = os.listdir(repy_prepend_dir)

    # make the directories...
    for num in range(len(vesselcreationlist)):
        vesselname = 'v' + str(num + 1)
        try:
            WindowsError

        except NameError:  # not on windows...
            # make the vessel dirs...
            try:
                os.mkdir(targetdirectory + "/" + vesselname)
            except OSError, e:
                if e[0] == 17:
                    # directory exists
                    pass
                else:
                    raise

        else:  # on Windows...

            # make the vessel dirs...
            try:
                os.mkdir(targetdirectory + "/" + vesselname)
            except (OSError, WindowsError), e:
                if e[0] == 17 or e[0] == 183:
                    # directory exists
                    pass
                else:
                    raise
コード例 #19
0
ファイル: nminit_test.py プロジェクト: BuildSeash/seash
def initialize_state():

  # first, let's clean up any existing directory data...
  for vesseldirectoryname in glob.glob('v[0-9]*'):
    if os.path.isdir(vesseldirectoryname):
      print 'Removing:',vesseldirectoryname
      shutil.rmtree(vesseldirectoryname)

  # initialize my configuration file.   This involves a few variables:
  #    pollfrequency --  the amount of time to sleep after a check when "busy
  #                      waiting".   This trades CPU load for responsiveness.
  #    ports         --  the ports the node manager could listen on.
  #    publickey     --  the public key used to identify the node...
  #    privatekey    --  the corresponding private key for the node...
  configuration = {}

  configuration['pollfrequency'] = 1.0

  # NOTE: I chose these randomly (they will be uniform across all NMs)...   
  # Was this wise?

  # Modified By Monzur on Dec 2, 2010. Changed the first port to the tag
  # nodemanager_port tag. Usually this will be replaced by the port number
  # 1224 by preparetest.py. If --random-ports option is used when running
  # preparetest.py, then the first port will be a random port. This will 
  # allow us to run multiple nodemanagers for testing purposes.
  configuration['ports'] = [<nodemanager_port>, 2888, 9625, 10348, 39303, 48126, 52862, 57344, 64310]

  print "Generating owner key..."
  keys = rsa_gen_pubpriv_keys(100)
  configuration['publickey'] = keys[0]
  configuration['privatekey'] = keys[1]
  configuration['service_vessel'] = 'v2'

  print "Writing config file..."
  # write the config file...
  persist.commit_object(configuration,"nodeman.cfg")

  # write the offcut file...
  outfo = open("resources.offcut","w")
  outfo.write(offcutresourcedata)
  outfo.close()

#  vessel1 = make_vessel('v1',controllerpubkey,bigresourcedata, []) 
  vessel1 = make_vessel('v1',controllerpubkey,smallresourcedata, ('12345','12346', '12347','12348','12345','12346','12347','12348')) 
  vessel2 = make_vessel('v2',justinpubkey,smallresourcedata, ('20000','20001', '20002','20003','20000','20001','20002','20003')) 
  vessel3 = make_vessel('v3',guest0pubkey,smallresourcedata, ('30000','30001', '30002','30003','30000','30001','30002','30003')) 
  vessel4 = make_vessel('v4',guest0pubkey,smallresourcedata, ('21000','21001', '21002','21003','21000','21001','21002','21003')) 
  vessel5 = make_vessel('v5',guest1pubkey,smallresourcedata, ('22000','22001', '22002','22003','22000','22001','22002','22003')) 
  vessel6 = make_vessel('v6',guest1pubkey,smallresourcedata, ('23000','23001', '23002','23003','23000','23001','23002','23003')) 
  vessel7 = make_vessel('v7',guest2pubkey,smallresourcedata, ('24000','24001', '24002','24003','24000','24001','24002','24003')) 
  vessel8 = make_vessel('v8',guest2pubkey,smallresourcedata, ('25000','25001', '25002','25003','25000','25001','25002','25003')) 
  vessel9 = make_vessel('v9',guest3pubkey,smallresourcedata, ('26000','26001', '26002','26003','26000','26001','26002','26003')) 
  vessel10 = make_vessel('v10',guest3pubkey,smallresourcedata, ('27000','27001', '27002','27003','27000','27001','27002','27003')) 
  

  vesseldict = {'v1':vessel1, 'v2':vessel2, 'v3':vessel3, 'v4':vessel4, 'v5':vessel5, 'v6':vessel6, 'v7':vessel7, 'v8':vessel8, 'v9':vessel9, 'v10':vessel10}

  print "Writing vessel dictionary..."
  # write out the vessel dictionary...
  persist.commit_object(vesseldict,"vesseldict")
コード例 #20
0
                            + "modifying the crontab for the new 2009 " \
                            + "seattle crontab entry: " \
                            + exception_traceback_string)
  
  if LOCAL_MODE:
    servicelogger.log('[LOCAL]: Operating in local_mode.')
    nmrequesthandler.LOCAL_MODE = LOCAL_MODE
    fastsigneddata.LOCAL_MODE = LOCAL_MODE

    if 'local_mode' not in configuration:
      configuration['local_mode'] = True
      servicelogger.log('[LOCAL]: Adding local_mode to nodeman.cfg!')
    else:
      configuration['local_mode'] = True
      servicelogger.log('[LOCAL]: Setting local_mode to True in nodeman.cfg')
    persist.commit_object(configuration, "nodeman.cfg")

    tempVesselDict = persist.restore_object("vesseldict")
    for vesselname in tempVesselDict.keys()[:]:
      thisentry = tempVesselDict[vesselname]
      thisentry['local_mode'] = True
    persist.commit_object(tempVesselDict, "vesseldict")

  else:
    configuration['local_mode'] = False
    servicelogger.log('[LOCAL]: Setting local_mode to false...')
    persist.commit_object(configuration, "nodeman.cfg")

    tempVesselDict = persist.restore_object("vesseldict")
    for vesselname in tempVesselDict.keys()[:]:
      thisentry = tempVesselDict[vesselname]
コード例 #21
0
ファイル: nmAPI.py プロジェクト: JakobKallin/Seastorm-Preview
  except resourcemanipulation.ResourceMathError, e:
    raise BadRequest('Existing resources are so small they cannot be split.\n'+str(e))

  # now let's remove the proposed bits!
  try:
    finalresourcedict = resourcemanipulation.subtract_resourcedicts(intermediateresourcedict,proposedresourcedict)
  except resourcemanipulation.ResourceMathError, e:
    raise BadRequest('Proposed vessel is too large.\n'+str(e))

  # newname1 becomes the leftovers...
  _setup_vessel(newname1, vesselname, finalresourcedict, call_list_vessel)
  # newname2 is what the user requested
  _setup_vessel(newname2, vesselname, proposedresourcedict, call_list_vessel)
  _destroy_vessel(vesselname)
    
  persist.commit_object(vesseldict, "vesseldict")
  return newname1+" "+newname2+"\nSuccess"

    
  
  

def joinvessels(vesselname1, vesselname2):
  if vesselname1 not in vesseldict:
    raise BadRequest, "No such vessel '"+vesselname1+"'"
  if vesselname2 not in vesseldict:
    raise BadRequest, "No such vessel '"+vesselname2+"'"

  if vesseldict[vesselname1]['ownerkey'] != vesseldict[vesselname2]['ownerkey']:
    raise BadRequest("Vessels must have the same owner")
  
コード例 #22
0
ファイル: nmthreadingerror.py プロジェクト: Ashmita89/attic
def handle_threading_error():
  """
  <Purpose>
    Handles a repy node failing with ThreadErr. If repy is allowed to use
    more than 10% of the current threads, reduce the global thread count by 50%
    and stop all existing vessels

  <Arguments>
    None
  
  <Exceptions>
    None

  <Side Effects>
    May re-write all resource files and stop all vessels

  <Returns>
    None
  """
  # Make a log of this
  servicelogger.log("[ERROR]:A Repy vessel has exited with ThreadErr status. Checking to determine next step")

  # Get all the names of the vessels
  vesselnamelist = nmAPI.vesseldict.keys()
  
  # read in all of the resource files so that we can look at and possibly 
  # manipulate them.
  resourcedicts = {}
  for vesselname in vesselnamelist:
    resourcedicts[vesselname] = resourcemanipulation.read_resourcedict_from_file('resource.'+vesselname)
  
  # Get the number of threads Repy has allocated
  allowedthreadcount = 0
  for vesselname in vesselnamelist:
    allowedthreadcount = allowedthreadcount + resourcedicts[vesselname]['events']
  
  # Get the total number os system threads currently used 
  totalusedthreads = nonportable.os_api.get_system_thread_count()
  
  # Log this information
  servicelogger.log("[WARNING]:System Threads: "+str(totalusedthreads)+"  Repy Allocated Threads: "+str(allowedthreadcount))
  
  # Get the NM configuration
  configuration = persist.restore_object("nodeman.cfg")
  
  # Check if there is a threshold configuration,
  # otherwise add the default configuration
  if NOOP_CONFIG_KEY in configuration:
    threshold = configuration[NOOP_CONFIG_KEY]
  else:
    threshold = DEFAULT_NOOP_THRESHOLD
    configuration[NOOP_CONFIG_KEY] = threshold
    persist.commit_object(configuration, "nodeman.cfg")
  
  # Check if we are below the threshold, if so
  # then just return, this is a noop
  if allowedthreadcount < totalusedthreads * threshold:
    return
  
  servicelogger.log("[ERROR]:Reducing number of system threads!")



  #### We are above the threshold!   Let's cut everything by 1/2

  # First, update the resource files
  for vesselname in vesselnamelist:
    # cut the events by 1/2
    resourcedicts[vesselname]['events'] = resourcedicts[vesselname]['events'] / 2
    # write out the new resource files...
    resourcemanipulation.write_resourcedict_to_file(resourcedicts[vesselname], 'resource.'+vesselname)
  

  
  
  # Create the stop tuple, exit code 57 with an error message
  stoptuple = (57, "Fatal system-wide threading error! Stopping all vessels.")
  
  # Stop each vessel
  for vesselname in vesselnamelist:
    try:
      # Stop each vessel, using our stoptuple
      nmAPI.stopvessel(vesselname,stoptuple)
    except Exception, exp:
      # Forge on, regardless of errors
      servicelogger.log("[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: "+str(exp))
      servicelogger.log_last_exception()
コード例 #23
0
            'Existing resources are so small they cannot be split.\n' + str(e))

    # now let's remove the proposed bits!
    try:
        finalresourcedict = resourcemanipulation.subtract_resourcedicts(
            intermediateresourcedict, proposedresourcedict)
    except resourcemanipulation.ResourceMathError, e:
        raise BadRequest('Proposed vessel is too large.\n' + str(e))

    # newname1 becomes the leftovers...
    _setup_vessel(newname1, vesselname, finalresourcedict, call_list_vessel)
    # newname2 is what the user requested
    _setup_vessel(newname2, vesselname, proposedresourcedict, call_list_vessel)
    _destroy_vessel(vesselname)

    persist.commit_object(vesseldict, "vesseldict")
    return newname1 + " " + newname2 + "\nSuccess"


def joinvessels(vesselname1, vesselname2):
    if vesselname1 not in vesseldict:
        raise BadRequest, "No such vessel '" + vesselname1 + "'"
    if vesselname2 not in vesseldict:
        raise BadRequest, "No such vessel '" + vesselname2 + "'"

    if vesseldict[vesselname1]['ownerkey'] != vesseldict[vesselname2][
            'ownerkey']:
        raise BadRequest("Vessels must have the same owner")

    if vesseldict[vesselname1]['status'] == 'Started' or vesseldict[
            vesselname2]['status'] == 'Started':
コード例 #24
0
initproc = subprocess.Popen([sys.executable, "nminit.py"])
initproc.wait()

# nminit doesn't automatically initialize the vessels with security layer files,
# so we should do this ourselves
for vesseldirectoryname in glob.glob('v[0-9]*'):
  if os.path.isdir(vesseldirectoryname):
    shutil.rmtree(vesseldirectoryname)
    shutil.copytree("securitylayers", vesseldirectoryname)

# add security layer configuration to nodeman.cfg
configuration = persist.restore_object("nodeman.cfg")
configuration['repy_prepend'] = ["private_encasementlib.repy", "private_hideprivate_layer.repy"]
configuration['repy_prepend_dir'] = "securitylayers"
persist.commit_object(configuration, "nodeman.cfg")

nmproc = subprocess.Popen([sys.executable, "nmmain.py", "--test-mode"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

# without closing stdin, stdout, stderr, nmmain.py won't execute on XP
nmproc.stdin.close()
nmproc.stdout.close()
nmproc.stderr.close()

try:
  # We'll want to kill this once we are told to do so... (sys.stdin.close() is
  # the signal)
  sys.stdin.read()
except KeyboardInterrupt:
  # the user interrupted us, let's clean up!
  print
コード例 #25
0
def uninstall_Linux_and_Mac():
    """
  <Purpose>
    Remove the seattle entry from the crontab, and kill all seattle processes
    by using stop_all_seattle_processes.py
  <Arguments>
    None.
  <Exceptions>
    SeattleNotInstalledError if seattle had not been initially installed when
      the uninstaller was run.
    UnsupportedOSError from a child function if the OS running this script is
      not supported.
  <Side Effects>
    Removes the seattle entry from the crontab and stops seattle from running.
  <Returns>
    True if succeeded in uninstalling,
    False otherwise.
  """

    # Derek Cheng: Find out if this is a Nokia N800/900 Tablet, and if so runs a
    # separate uninstaller because there is no crontab on the tablets.
    if platform.machine().startswith('armv'):
        return uninstall_nokia()

    if platform.machine().startswith('mips'):
        return True

    # Find out if Seattle is installed (currently in the crontab), and remove if
    # so.
    crontab_contents_stdout = subprocess.Popen(["crontab", "-l"],
                                               stdout=subprocess.PIPE,
                                               stderr=subprocess.PIPE).stdout

    # Generate a temp file with the user's crontab minus our task.
    temp_crontab_file = tempfile.NamedTemporaryFile()

    seattle_crontab_entry_found = False
    for line in crontab_contents_stdout:
        if not seattleinstaller.get_starter_file_name() in line:
            temp_crontab_file.write(line)
        else:
            seattle_crontab_entry_found = True

    if not seattle_crontab_entry_found:
        temp_crontab_file.close()
        raise SeattleNotInstalledError("Seattle cannot be uninstalled because it " \
                                         + "is not currently installed.")

    # Replace the old crontab with the updated crontab contents.
    temp_crontab_file.flush()
    replace_crontab = subprocess.Popen(["crontab", temp_crontab_file.name],
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
    replace_crontab.wait()
    temp_crontab_file.close()

    # Confirm that seattle was successfully removed from the crontab, and set
    # the 'crontab_updated_for_2009_installer' value in nodeman.cfg to False.
    modified_crontab_contents_stdout,modified_crontab_contents_stderr = \
        subprocess.Popen(["crontab","-l"],
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE).communicate()

    if seattleinstaller.get_starter_file_name() \
          in modified_crontab_contents_stdout:
        return False
    else:
        # Stop all instances of seattle from running before returning.
        stop_all_seattle_processes.main()

        configuration = persist.restore_object("nodeman.cfg")
        configuration['crontab_updated_for_2009_installer'] = False
        persist.commit_object(configuration, "nodeman.cfg")

        return True
コード例 #26
0
def handle_threading_error():
    """
  <Purpose>
    Handles a repy node failing with ThreadErr. If repy is allowed to use
    more than 10% of the current threads, reduce the global thread count by 50%
    and stop all existing vessels

  <Arguments>
    None
  
  <Exceptions>
    None

  <Side Effects>
    May re-write all resource files and stop all vessels

  <Returns>
    None
  """
    # Make a log of this
    servicelogger.log(
        "[ERROR]:A Repy vessel has exited with ThreadErr status. Checking to determine next step"
    )

    # Get all the names of the vessels
    vesselnamelist = nmAPI.vesseldict.keys()

    # read in all of the resource files so that we can look at and possibly
    # manipulate them.
    resourcedicts = {}
    for vesselname in vesselnamelist:
        resourcedicts[
            vesselname] = resourcemanipulation.read_resourcedict_from_file(
                'resource.' + vesselname)

    # Get the number of threads Repy has allocated
    allowedthreadcount = 0
    for vesselname in vesselnamelist:
        allowedthreadcount = allowedthreadcount + resourcedicts[vesselname][
            'events']

    # Get the total number os system threads currently used
    totalusedthreads = nonportable.os_api.get_system_thread_count()

    # Log this information
    servicelogger.log("[WARNING]:System Threads: " + str(totalusedthreads) +
                      "  Repy Allocated Threads: " + str(allowedthreadcount))

    # Get the NM configuration
    configuration = persist.restore_object("nodeman.cfg")

    # Check if there is a threshold configuration,
    # otherwise add the default configuration
    if NOOP_CONFIG_KEY in configuration:
        threshold = configuration[NOOP_CONFIG_KEY]
    else:
        threshold = DEFAULT_NOOP_THRESHOLD
        configuration[NOOP_CONFIG_KEY] = threshold
        persist.commit_object(configuration, "nodeman.cfg")

    # Check if we are below the threshold, if so
    # then just return, this is a noop
    if allowedthreadcount < totalusedthreads * threshold:
        return

    servicelogger.log("[ERROR]:Reducing number of system threads!")

    #### We are above the threshold!   Let's cut everything by 1/2

    # First, update the resource files
    for vesselname in vesselnamelist:
        # cut the events by 1/2
        resourcedicts[vesselname][
            'events'] = resourcedicts[vesselname]['events'] / 2
        # write out the new resource files...
        resourcemanipulation.write_resourcedict_to_file(
            resourcedicts[vesselname], 'resource.' + vesselname)

    # Create the stop tuple, exit code 57 with an error message
    stoptuple = (57,
                 "Fatal system-wide threading error! Stopping all vessels.")

    # Stop each vessel
    for vesselname in vesselnamelist:
        try:
            # Stop each vessel, using our stoptuple
            nmAPI.stopvessel(vesselname, stoptuple)
        except Exception, exp:
            # Forge on, regardless of errors
            servicelogger.log(
                "[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: "
                + str(exp))
            servicelogger.log_last_exception()
コード例 #27
0
ファイル: nmthreadingerror.py プロジェクト: Ashmita89/attic
def handle_threading_error(nmAPI):
    """
  <Purpose>
    Handles a repy node failing with ThreadErr. Reduces global thread count by 50%.
    Restarts all existing vesselts

  <Arguments>
    nmAPI: the nmAPI module -- passed to the function to avoid import loops;
           see ticket #590 for more information about this.
  """
    # Make a log of this
    servicelogger.log(
        "[ERROR]:A Repy vessel has exited with ThreadErr status. Patching restrictions and reseting all vessels."
    )

    # Get the number of threads Repy has allocated
    allocatedThreads = get_allocated_threads()

    # Get the number os system threads currently
    systemThreads = nonportable.os_api.get_system_thread_count()

    # Log this information
    servicelogger.log(
        "[ERROR]:System Threads: " + str(systemThreads) + "  Repy Allocated Threads: " + str(allocatedThreads)
    )

    # Get the NM configuration
    configuration = persist.restore_object("nodeman.cfg")

    # Check if there is a threshold configuration,
    # otherwise add the default configuration
    if NOOP_CONFIG_KEY in configuration:
        threshold = configuration[NOOP_CONFIG_KEY]
    else:
        threshold = DEFAULT_NOOP_THRESHOLD
        configuration[NOOP_CONFIG_KEY] = threshold
        persist.commit_object(configuration, "nodeman.cfg")

    # Check if we are below the threshold, if so
    # then just return, this is a noop
    if allocatedThreads < systemThreads * threshold:
        return

    # We are continuing, so we are above the threshold!
    # First, update the restrictions
    update_restrictions()

    # Then, stop the vessels
    # Get all the vessels
    vessels = nmAPI.vesseldict.keys()

    # Create the stop tuple, exit code 57 with an error message
    stoptuple = (57, "Fatal system-wide threading error! Stopping all vessels.")

    # Stop each vessel
    for vessel in vessels:
        try:
            # Stop each vessel, using our stoptuple
            nmAPI.stopvessel(vessel, stoptuple)
        except Exception, exp:
            # Forge on, regardless of errors
            servicelogger.log("[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: " + str(exp))
            servicelogger.log_last_exception()
コード例 #28
0
def main(vesselcreationlist, tenpercentdict, targetdirectory):
  """
  <Purpose>
    To take the vesselcreationlist (list containing owner and user info)
    and generate the initial resource files and directories. 
    
    Working Definition: 
      percentage - in this method we are dealing with how to split up the 
      donated resources (this could 10% of the system's available resources, 
      or it could be a user defined percentage but regardless we only 
      consider the portion donated). So any time we refer to percentages 
      in this method we are talking about the percentage of the donated 
      resources. This applies to the tenpercentdict that is passed as an
      arguement (it is only ten percent of the donated resources).
    
    This method does not special case the seattle vessel, it only creates
    vessels based off what is in the vesselcreationlist list (the original
    writecustominstaller handled the seattle vessel seperately).
    
  <Arguements>
    vesselcreationlist: a list that has the following format:
    [(vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),
      (vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),...]
      It contains the information for the initial vessels of the node.
     
    targetdirectory: a string containing the directory where the 
      restrictions files and vesseldict will be placed.
     
  <Exceptions>
    ValueError: if the vessel percent (from element 0 of a tuple in the 
      vesselcreationlist list) is not 10,20,30, ... , 80, 90, 100
      A ValueError will also be raised if the total percent (of all
      the vessel percents in the vesselcreationlist list) does not add
      up to the integer 100

    IOError: if vessel.restrictions is not found, this must be included
      ahead of time in the target directory.
  
    OSError, WindowsError: if os.mkdir is unable to create the vessel
      directories.
      
  <Side Effects>
    Creates the vesseldict using persist.commit_object.
    Creates resource files for each vessel with names of the form
      resource.v1, resource.v2, ...
    Creates a directory for each vessel with a name of the form v1, v2, ...
      
  <Return>
    None
  
  <Notes>   
    The vesselcreationlist is a list that has the following format:
    [(vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames),
     (vessel percent, ownerpubkeyfilename, list of user pubkeyfilenames), ...]
  
    So it might look like:
      [(10, 'joe.publickey', []),
       (50, 'jim.publickey', ['alice.publickey', 'bob.publickey']), 
       (40, 'alice.publickey', []) ]
  
    This would indicate v1 is owned by joe and gets 10 percent, v2 is owned
    by jim has users alice and bob and gets 50 percent, v3 is owned by
    alice and gets 40 percent.   I'll track the seattle information and
    will write it independently.
  
  <Notes>
    The structure of this method can be described in 3 distinct sections.
    1) The vesseldict is constructed from the information in vesselcreationlist,
       is contains the public keys and other vessel information.
    2) The resource files are written to files for all the vessels
    3) Directories are created for all the vessels.
  """

  # Check to ensure that the vessel information is in the correct format.
  # Each vessel percent must be an integer multiple of ten from (0,100]
  # and the total must be 100. This is checked here instead of when the
  # list is originally constructed from the vesselinfo file so that only
  # this method must be changed if the granularity of the possible vessel
  # percentages is changed.  
  total = 0
  
  for item in vesselcreationlist:
    if item[0] not in range(10, 101, 10):
      raise ValueError, "Invalid vessel percent '"+str(item[0])+"'"
    total = total + item[0]

  if total != 100:
    raise ValueError, "Total of vessel percentage is '"+str(total)+"', not 100."


 
  vesseldict = {}
  vesselnumber = 1
  # time to work on the vessel dictionary...
  for item in vesselcreationlist:

    vesselname = 'v'+str(vesselnumber)

    vesseldict['v'+str(vesselnumber)] = {'userkeys':item[2], 'ownerkey':item[1], 'oldmetadata':None, 'stopfilename':vesselname+'.stop', 'logfilename':vesselname+'.log', 'statusfilename':vesselname+'.status', 'resourcefilename':'resource.'+vesselname, 'advertise':True, 'ownerinformation':'', 'status':'Fresh'}
    vesselnumber = vesselnumber + 1
    
  persist.commit_object(vesseldict,targetdirectory+"/vesseldict")


  # I'm going to do the resources / restrictions now...
  onetenth = tenpercentdict.copy()
  
  # These are the restrictions that apply to all vessels, they are
  # not a resource we measure.
  restrictionsfo = file('vessel.restrictions')
  restrictionsstring = restrictionsfo.read()
  restrictionsfo.close()

  # I'll use this to figure out which ports to assign
  usedpercent = 0
  vesselnumber = 1

  for item in vesselcreationlist:
    
    # the percentcount variable is slightly confusing, up until we have talked
    # about vessels as haveing 10 or 20 or ... percent of the donated resources.
    # We restrict vessels to being a multiple of ten so that we do not have to
    # cut down the donated resources to far (this is an attempt to keep things
    # simple and avoid getting resources at zero).
    # percentcount should be an integer between 0 and 10
    percentcount = item[0] / 10
    # make a resource file of the right size...
    size = percentcount
    thisresourcedata = tenpercentdict.copy()
    while size > 1:
      thisresourcedata = nmresourcemath.add(thisresourcedata, onetenth)
      size = size - 1

    # I need the ports...
    startpercent = usedpercent
    endpercent = usedpercent + percentcount
    # a yucky way of getting the ports.   Should do 63100-63109 for the first,
    # 63110-63119 for the second, etc.
    thisresourcedata['messport'] = set(range(63100+10*startpercent, 63100+10*endpercent))
    thisresourcedata['connport'] = set(range(63100+10*startpercent, 63100+10*endpercent))
    
    
    nmresourcemath.write_resource_dict(thisresourcedata, targetdirectory+"/resource.v"+str(vesselnumber))
        
    # append the restrictions data.
    restrictionsfo = file(targetdirectory+'/resource.v'+str(vesselnumber),"a")
    restrictionsfo.write(restrictionsstring)
    restrictionsfo.close()

    # increment the vesselnumber and used percent
    vesselnumber = vesselnumber + 1
    usedpercent = usedpercent + percentcount


  # Get the directory, if any, that is used for security layers.
  configuration = persist.restore_object("nodeman.cfg")
  repy_prepend_dir = None
  if 'repy_prepend_dir' in configuration:
    repy_prepend_dir = configuration['repy_prepend_dir']

  # Get the list of files in the security layer directory
  repy_prepend_files = []
  if repy_prepend_dir is not None:
    repy_prepend_files = os.listdir(repy_prepend_dir)

  # make the directories...
  for num in range(len(vesselcreationlist)):
    vesselname = 'v'+str(num+1)
    try:
      WindowsError

    except NameError: # not on windows...
      # make the vessel dirs...
      try:
        os.mkdir(targetdirectory+"/"+vesselname)
      except OSError,e:
        if e[0] == 17:
          # directory exists
          pass
        else:
          raise

    else: # on Windows...

      # make the vessel dirs...
      try:
        os.mkdir(targetdirectory+"/"+vesselname)
      except (OSError,WindowsError),e:
        if e[0] == 17 or e[0] == 183:
          # directory exists
          pass
        else:
          raise
コード例 #29
0
    if OS == 'Linux' or OS == 'Darwin':
      successful_uninstall = uninstall_Linux_and_Mac()
    else:
      successful_uninstall = uninstall_Windows()
  except SeattleNotInstalledError,s:
    _output("Seattle was not detected as having been installed.")
    return



  # Print final output, and do final logging.
  if successful_uninstall:
    # Modify nodeman.cfg to note that seattle is no longer installed.
    configuration = persist.restore_object("nodeman.cfg")
    configuration['seattle_installed'] = False
    persist.commit_object(configuration,"nodeman.cfg")

    _output("Seattle has been successfully uninstalled.  It is now safe to " \
              + "remove all Seattle files and directories from your system.")
    servicelogger.log(time.strftime(" seattle completed uninstall on: " \
                                      + "%m-%d-%Y %H:%M:%S"))
  else:
    _output("Seattle could not be uninstalled for unknown reasons. Please " \
              + "contact the Seattle development team for assistance.")




if __name__ == "__main__":
  main()
コード例 #30
0
def uninstall_Linux_and_Mac():
  """
  <Purpose>
    Remove the seattle entry from the crontab, and kill all seattle processes
    by using stop_all_seattle_processes.py
  <Arguments>
    None.
  <Exceptions>
    SeattleNotInstalledError if seattle had not been initially installed when
      the uninstaller was run.
    UnsupportedOSError from a child function if the OS running this script is
      not supported.
  <Side Effects>
    Removes the seattle entry from the crontab and stops seattle from running.
  <Returns>
    True if succeeded in uninstalling,
    False otherwise.
  """


  # Derek Cheng: Find out if this is a Nokia N800/900 Tablet, and if so runs a 
  # separate uninstaller because there is no crontab on the tablets.
  if platform.machine().startswith('armv'):
    return uninstall_nokia()

  if platform.machine().startswith('mips'):
    return True


  # Find out if Seattle is installed (currently in the crontab), and remove if
  # so.
  crontab_contents_stdout = subprocess.Popen(["crontab","-l"],
                                             stdout=subprocess.PIPE,
                                             stderr=subprocess.PIPE).stdout

  # Generate a temp file with the user's crontab minus our task.
  temp_crontab_file = tempfile.NamedTemporaryFile()

  seattle_crontab_entry_found = False
  for line in crontab_contents_stdout:
    if not seattleinstaller.get_starter_file_name() in line:
      temp_crontab_file.write(line)
    else:
      seattle_crontab_entry_found = True


  if not seattle_crontab_entry_found:
    temp_crontab_file.close()
    raise SeattleNotInstalledError("Seattle cannot be uninstalled because it " \
                                     + "is not currently installed.")



  # Replace the old crontab with the updated crontab contents.
  temp_crontab_file.flush()
  replace_crontab = subprocess.Popen(["crontab",temp_crontab_file.name],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
  replace_crontab.wait()
  temp_crontab_file.close()



  # Confirm that seattle was successfully removed from the crontab, and set
  # the 'crontab_updated_for_2009_installer' value in nodeman.cfg to False.
  modified_crontab_contents_stdout,modified_crontab_contents_stderr = \
      subprocess.Popen(["crontab","-l"],
                       stdout=subprocess.PIPE,
                       stderr=subprocess.PIPE).communicate()

  if seattleinstaller.get_starter_file_name() \
        in modified_crontab_contents_stdout:
    return False
  else:
    # Stop all instances of seattle from running before returning.
    stop_all_seattle_processes.main()

    configuration = persist.restore_object("nodeman.cfg")
    configuration['crontab_updated_for_2009_installer'] = False
    persist.commit_object(configuration,"nodeman.cfg")

    return True
コード例 #31
0
ファイル: nmmain.py プロジェクト: fagan2888/seattle-package
def main():
    global configuration

    if not FOREGROUND:
        # Background ourselves.
        daemon.daemonize()

    # Check if we are running in testmode.
    if TEST_NM:
        nodemanager_pid = os.getpid()
        servicelogger.log(
            "[INFO]: Running nodemanager in test mode on port 1224, " +
            "pid %s." % str(nodemanager_pid))
        nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'),
                                'w')

        # Write out the pid of the nodemanager process that we started to a file.
        # This is only done if the nodemanager was started in test mode.
        try:
            nodeman_pid_file.write(str(nodemanager_pid))
        finally:
            nodeman_pid_file.close()

    else:
        # ensure that only one instance is running at a time...
        gotlock = runonce.getprocesslock("seattlenodemanager")

        if gotlock == True:
            # I got the lock.   All is well...
            pass
        else:
            if gotlock:
                servicelogger.log(
                    "[ERROR]:Another node manager process (pid: " +
                    str(gotlock) + ") is running")
            else:
                servicelogger.log(
                    "[ERROR]:Another node manager process is running")
            return

    servicelogger.log('[INFO]: This is Seattle release "' + version + "'")

    # Feature add for #1031: Log information about the system in the nm log...
    servicelogger.log('[INFO]:platform.python_version(): "' +
                      str(platform.python_version()) + '"')
    servicelogger.log('[INFO]:platform.platform(): "' +
                      str(platform.platform()) + '"')

    # uname on Android only yields 'Linux', let's be more specific.
    try:
        import android
        servicelogger.log('[INFO]:platform.uname(): Android / "' +
                          str(platform.uname()) + '"')
    except ImportError:
        servicelogger.log('[INFO]:platform.uname(): "' +
                          str(platform.uname()) + '"')

    # I'll grab the necessary information first...
    servicelogger.log("[INFO]:Loading config")
    # BUG: Do this better?   Is this the right way to engineer this?
    configuration = persist.restore_object("nodeman.cfg")

    # Armon: initialize the network restrictions
    initialize_ip_interface_restrictions(configuration)

    # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
    #            seattle crontab entry has been installed in the crontab.
    #            Do this here because the "nodeman.cfg" needs to have been read
    #            into configuration via the persist module.
    if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
        if 'crontab_updated_for_2009_installer' not in configuration or \
              configuration['crontab_updated_for_2009_installer'] == False:
            try:
                # crontab may not exist on Android, therefore let's not check
                # if we are running on Android. See #1302 and #1254.
                try:
                    import android
                except ImportError:
                    import update_crontab_entry
                    modified_crontab_entry = \
                        update_crontab_entry.modify_seattle_crontab_entry()
                    # If updating the seattle crontab entry succeeded, then update the
                    # 'crontab_updated_for_2009_installer' so the nodemanager no longer
                    # tries to update the crontab entry when it starts up.
                    if modified_crontab_entry:
                        configuration[
                            'crontab_updated_for_2009_installer'] = True
                        persist.commit_object(configuration, "nodeman.cfg")

            except Exception, e:
                exception_traceback_string = traceback.format_exc()
                servicelogger.log("[ERROR]: The following error occured when " \
                                    + "modifying the crontab for the new 2009 " \
                                    + "seattle crontab entry: " \
                                    + exception_traceback_string)
コード例 #32
0
def main():
  global configuration

  if not FOREGROUND:
    # Background ourselves.
    daemon.daemonize()


  # Check if we are running in testmode.
  if TEST_NM:
    nodemanager_pid = os.getpid()
    servicelogger.log("[INFO]: Running nodemanager in test mode on port <nodemanager_port>, "+
                      "pid %s." % str(nodemanager_pid))
    nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'), 'w')
    
    # Write out the pid of the nodemanager process that we started to a file.
    # This is only done if the nodemanager was started in test mode.
    try:
      nodeman_pid_file.write(str(nodemanager_pid))
    finally:
      nodeman_pid_file.close()

  else:
    # ensure that only one instance is running at a time...
    gotlock = runonce.getprocesslock("seattlenodemanager")

    if gotlock == True:
      # I got the lock.   All is well...
      pass
    else:
      if gotlock:
        servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + 
                        ") is running")
      else:
        servicelogger.log("[ERROR]:Another node manager process is running")
      return



  # Feature add for #1031: Log information about the system in the nm log...
  servicelogger.log('[INFO]:platform.python_version(): "' + 
    str(platform.python_version())+'"')
  servicelogger.log('[INFO]:platform.platform(): "' + 
    str(platform.platform())+'"')

  # uname on Android only yields 'Linux', let's be more specific.
  try:
    import android
    servicelogger.log('[INFO]:platform.uname(): Android / "' + 
      str(platform.uname())+'"')
  except ImportError:
    servicelogger.log('[INFO]:platform.uname(): "'+str(platform.uname())+'"')

  # I'll grab the necessary information first...
  servicelogger.log("[INFO]:Loading config")
  # BUG: Do this better?   Is this the right way to engineer this?
  configuration = persist.restore_object("nodeman.cfg")
  
  
  # Armon: initialize the network restrictions
  initialize_ip_interface_restrictions(configuration)
  
  
  
  # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
  #            seattle crontab entry has been installed in the crontab.
  #            Do this here because the "nodeman.cfg" needs to have been read
  #            into configuration via the persist module.
  if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
    if 'crontab_updated_for_2009_installer' not in configuration or \
          configuration['crontab_updated_for_2009_installer'] == False:
      try:
        # crontab may not exist on Android, therefore let's not check
        # if we are running on Android. See #1302 and #1254.
        try:
          import android
        except ImportError:
          import update_crontab_entry
          modified_crontab_entry = \
              update_crontab_entry.modify_seattle_crontab_entry()
          # If updating the seattle crontab entry succeeded, then update the
          # 'crontab_updated_for_2009_installer' so the nodemanager no longer
          # tries to update the crontab entry when it starts up.
          if modified_crontab_entry:
            configuration['crontab_updated_for_2009_installer'] = True
            persist.commit_object(configuration,"nodeman.cfg")

      except Exception,e:
        exception_traceback_string = traceback.format_exc()
        servicelogger.log("[ERROR]: The following error occured when " \
                            + "modifying the crontab for the new 2009 " \
                            + "seattle crontab entry: " \
                            + exception_traceback_string)
コード例 #33
0
def handle_threading_error(nmAPI):
    """
  <Purpose>
    Handles a repy node failing with ThreadErr. Reduces global thread count by 50%.
    Restarts all existing vesselts

  <Arguments>
    nmAPI: the nmAPI module -- passed to the function to avoid import loops;
           see ticket #590 for more information about this.
  """
    # Make a log of this
    servicelogger.log(
        "[ERROR]:A Repy vessel has exited with ThreadErr status. Patching restrictions and reseting all vessels."
    )

    # Get the number of threads Repy has allocated
    allocatedThreads = get_allocated_threads()

    # Get the number os system threads currently
    systemThreads = nonportable.os_api.get_system_thread_count()

    # Log this information
    servicelogger.log("[ERROR]:System Threads: " + str(systemThreads) +
                      "  Repy Allocated Threads: " + str(allocatedThreads))

    # Get the NM configuration
    configuration = persist.restore_object("nodeman.cfg")

    # Check if there is a threshold configuration,
    # otherwise add the default configuration
    if NOOP_CONFIG_KEY in configuration:
        threshold = configuration[NOOP_CONFIG_KEY]
    else:
        threshold = DEFAULT_NOOP_THRESHOLD
        configuration[NOOP_CONFIG_KEY] = threshold
        persist.commit_object(configuration, "nodeman.cfg")

    # Check if we are below the threshold, if so
    # then just return, this is a noop
    if allocatedThreads < systemThreads * threshold:
        return

    # We are continuing, so we are above the threshold!
    # First, update the restrictions
    update_restrictions()

    # Then, stop the vessels
    # Get all the vessels
    vessels = nmAPI.vesseldict.keys()

    # Create the stop tuple, exit code 57 with an error message
    stoptuple = (57,
                 "Fatal system-wide threading error! Stopping all vessels.")

    # Stop each vessel
    for vessel in vessels:
        try:
            # Stop each vessel, using our stoptuple
            nmAPI.stopvessel(vessel, stoptuple)
        except Exception, exp:
            # Forge on, regardless of errors
            servicelogger.log(
                "[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: "
                + str(exp))
            servicelogger.log_last_exception()