def joinvessels(vesselname1, vesselname2): if vesselname1 not in vesseldict: raise BadRequest, "No such vessel '"+vesselname1+"'" if vesselname2 not in vesseldict: raise BadRequest, "No such vessel '"+vesselname2+"'" if vesseldict[vesselname1]['ownerkey'] != vesseldict[vesselname2]['ownerkey']: raise BadRequest("Vessels must have the same owner") if vesseldict[vesselname1]['status'] == 'Started' or vesseldict[vesselname2]['status'] == 'Started': raise BadRequest("Attempting to join a running vessel") # get the new name newname = get_new_vessel_name() currentresourcedict1, call_list_v1 = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname1]['resourcefilename']) currentresourcedict2, call_list_v2 = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname2]['resourcefilename']) offcutresourcedict, offcut_call_list = resourcemanipulation.read_resourcedict_from_file(offcutfilename) # the final resources are reconstructed from the offcut + vessel1 + vessel2 intermediateresourcedict = resourcemanipulation.add_resourcedicts(currentresourcedict1, currentresourcedict2) finalresourcedict = resourcemanipulation.add_resourcedicts(intermediateresourcedict, offcutresourcedict) # MMM: We add in the call list of resources from one of the original # resource file. Since this is for backward compatibility for Repy V1, # it does not matter which call list we add in to the final resource file. _setup_vessel(newname, vesselname1, finalresourcedict, call_list_v1) _destroy_vessel(vesselname1) _destroy_vessel(vesselname2) persist.commit_object(vesseldict, "vesseldict") return newname+"\nSuccess"
def joinvessels(vesselname1, vesselname2): if vesselname1 not in vesseldict: raise BadRequest, "No such vessel '"+vesselname1+"'" if vesselname2 not in vesseldict: raise BadRequest, "No such vessel '"+vesselname2+"'" if vesseldict[vesselname1]['ownerkey'] != vesseldict[vesselname2]['ownerkey']: raise BadRequest("Vessels must have the same owner") if vesseldict[vesselname1]['status'] == 'Started' or vesseldict[vesselname2]['status'] == 'Started': raise BadRequest("Attempting to join a running vessel") # get the new name newname = get_new_vessel_name() currentresourcedict1 = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname1]['resourcefilename']) currentresourcedict2 = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname2]['resourcefilename']) offcutresourcedict = resourcemanipulation.read_resourcedict_from_file(offcutfilename) # the final resources are reconstructed from the offcut + vessel1 + vessel2 intermediateresourcedict = resourcemanipulation.add_resourcedicts(currentresourcedict1, currentresourcedict2) finalresourcedict = resourcemanipulation.add_resourcedicts(intermediateresourcedict, offcutresourcedict) _setup_vessel(newname, vesselname1, finalresourcedict) _destroy_vessel(vesselname1) _destroy_vessel(vesselname2) persist.commit_object(vesseldict, "vesseldict") return newname+"\nSuccess"
def start_resource_nanny(resourcefilename): """ <Purpose> Initializes the resource information the nanny needs to do monitoring. <Arguments> resourcefilename: the file that contains the set of resources we will use. <Exceptions> ResourceParseError if the resource file is invalid <Side Effects> None <Returns> None. """ global _resources_allowed_dict global _resources_consumed_dict # get the resource information from disk _resources_allowed_dict, call_list = resourcemanipulation.read_resourcedict_from_file( resourcefilename) # this sets up a dictionary with the correct locks, etc. for tracking # resource use. _resources_consumed_dict = _create_resource_consumption_dict()
def start_resource_nanny(resourcefilename): """ <Purpose> Initializes the resource information the nanny needs to do monitoring. <Arguments> resourcefilename: the file that contains the set of resources we will use. <Exceptions> ResourceParseError if the resource file is invalid <Side Effects> None <Returns> None. """ global _resources_allowed_dict global _resources_consumed_dict # get the resource information from disk _resources_allowed_dict = resourcemanipulation.read_resourcedict_from_file(resourcefilename) # this sets up a dictionary with the correct locks, etc. for tracking # resource use. _resources_consumed_dict = _create_resource_consumption_dict()
def joinvessels(vesselname1, vesselname2): if vesselname1 not in vesseldict: raise BadRequest, "No such vessel '" + vesselname1 + "'" if vesselname2 not in vesseldict: raise BadRequest, "No such vessel '" + vesselname2 + "'" if vesseldict[vesselname1]['ownerkey'] != vesseldict[vesselname2][ 'ownerkey']: raise BadRequest("Vessels must have the same owner") if vesseldict[vesselname1]['status'] == 'Started' or vesseldict[ vesselname2]['status'] == 'Started': raise BadRequest("Attempting to join a running vessel") # get the new name newname = get_new_vessel_name() currentresourcedict1, call_list_v1 = resourcemanipulation.read_resourcedict_from_file( vesseldict[vesselname1]['resourcefilename']) currentresourcedict2, call_list_v2 = resourcemanipulation.read_resourcedict_from_file( vesseldict[vesselname2]['resourcefilename']) offcutresourcedict, offcut_call_list = resourcemanipulation.read_resourcedict_from_file( offcutfilename) # the final resources are reconstructed from the offcut + vessel1 + vessel2 intermediateresourcedict = resourcemanipulation.add_resourcedicts( currentresourcedict1, currentresourcedict2) finalresourcedict = resourcemanipulation.add_resourcedicts( intermediateresourcedict, offcutresourcedict) # MMM: We add in the call list of resources from one of the original # resource file. Since this is for backward compatibility for Repy V1, # it does not matter which call list we add in to the final resource file. _setup_vessel(newname, vesselname1, finalresourcedict, call_list_v1) _destroy_vessel(vesselname1) _destroy_vessel(vesselname2) persist.commit_object(vesseldict, "vesseldict") return newname + "\nSuccess"
def addfiletovessel(vesselname,filename, filedata): if vesselname not in vesseldict: raise BadRequest, "No such vessel" # get the current amount of data used by the vessel... currentsize = nonportable.compute_disk_use(vesselname+"/") # ...and the allowed amount resourcedict = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname]['resourcefilename']) # If the current size + the size of the new data is too large, then deny if currentsize + len(filedata) > resourcedict['diskused']: raise BadRequest("Not enough free disk space") try: _assert_is_allowed_filename(filename) except TypeError, e: raise BadRequest(str(e))
def addfiletovessel(vesselname,filename, filedata): if vesselname not in vesseldict: raise BadRequest, "No such vessel" # The user is restricted from using filename starting with 'private_' since # this part of the namespace is reserved for custom security layers. if filename.startswith("private_"): raise BadRequest("User is not allowed to use file names starting with 'private_'") # get the current amount of data used by the vessel... currentsize = nonportable.compute_disk_use(vesselname+"/") # ...and the allowed amount resourcedict, call_list = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname]['resourcefilename']) # If the current size + the size of the new data is too large, then deny if currentsize + len(filedata) > resourcedict['diskused']: raise BadRequest("Not enough free disk space") try: check_repy_filename(filename) except TypeError, e: raise BadRequest(str(e))
def addfiletovessel(vesselname, filename, filedata): if vesselname not in vesseldict: raise BadRequest, "No such vessel" # The user is restricted from using filename starting with 'private_' since # this part of the namespace is reserved for custom security layers. if filename.startswith("private_"): raise BadRequest( "User is not allowed to use file names starting with 'private_'") # get the current amount of data used by the vessel... currentsize = nonportable.compute_disk_use(vesselname + "/") # ...and the allowed amount resourcedict, call_list = resourcemanipulation.read_resourcedict_from_file( vesseldict[vesselname]['resourcefilename']) # If the current size + the size of the new data is too large, then deny if currentsize + len(filedata) > resourcedict['diskused']: raise BadRequest("Not enough free disk space") try: check_repy_filename(filename) except RepyArgumentError, e: raise BadRequest(str(e))
raise BadRequest, "No such vessel" if vesseldict[vesselname]['status'] == 'Started': raise BadRequest("Attempting to split a running vessel") # get the new name newname1 = get_new_vessel_name() newname2 = get_new_vessel_name() try: proposedresourcedict, call_list = resourcemanipulation.parse_resourcedict_from_string(resourcedata) except resourcemanipulation.ResourceParseError, e: raise BadRequest(str(e)) # we must have enough so that starting - offcut - proposed > 0 startingresourcedict, call_list_vessel = resourcemanipulation.read_resourcedict_from_file(vesseldict[vesselname]['resourcefilename']) offcutresourcedict, offcut_call_list = resourcemanipulation.read_resourcedict_from_file(offcutfilename) # let's see what happens if we just remove the offcut... try: intermediateresourcedict = resourcemanipulation.subtract_resourcedicts(startingresourcedict,offcutresourcedict) except resourcemanipulation.ResourceMathError, e: raise BadRequest('Existing resources are so small they cannot be split.\n'+str(e)) # now let's remove the proposed bits! try: finalresourcedict = resourcemanipulation.subtract_resourcedicts(intermediateresourcedict,proposedresourcedict) except resourcemanipulation.ResourceMathError, e: raise BadRequest('Proposed vessel is too large.\n'+str(e))
def handle_threading_error(): """ <Purpose> Handles a repy node failing with ThreadErr. If repy is allowed to use more than 10% of the current threads, reduce the global thread count by 50% and stop all existing vessels <Arguments> None <Exceptions> None <Side Effects> May re-write all resource files and stop all vessels <Returns> None """ # Make a log of this servicelogger.log("[ERROR]:A Repy vessel has exited with ThreadErr status. Checking to determine next step") # Get all the names of the vessels vesselnamelist = nmAPI.vesseldict.keys() # read in all of the resource files so that we can look at and possibly # manipulate them. resourcedicts = {} for vesselname in vesselnamelist: resourcedicts[vesselname] = resourcemanipulation.read_resourcedict_from_file('resource.'+vesselname) # Get the number of threads Repy has allocated allowedthreadcount = 0 for vesselname in vesselnamelist: allowedthreadcount = allowedthreadcount + resourcedicts[vesselname]['events'] # Get the total number os system threads currently used totalusedthreads = nonportable.os_api.get_system_thread_count() # Log this information servicelogger.log("[WARNING]:System Threads: "+str(totalusedthreads)+" Repy Allocated Threads: "+str(allowedthreadcount)) # Get the NM configuration configuration = persist.restore_object("nodeman.cfg") # Check if there is a threshold configuration, # otherwise add the default configuration if NOOP_CONFIG_KEY in configuration: threshold = configuration[NOOP_CONFIG_KEY] else: threshold = DEFAULT_NOOP_THRESHOLD configuration[NOOP_CONFIG_KEY] = threshold persist.commit_object(configuration, "nodeman.cfg") # Check if we are below the threshold, if so # then just return, this is a noop if allowedthreadcount < totalusedthreads * threshold: return servicelogger.log("[ERROR]:Reducing number of system threads!") #### We are above the threshold! Let's cut everything by 1/2 # First, update the resource files for vesselname in vesselnamelist: # cut the events by 1/2 resourcedicts[vesselname]['events'] = resourcedicts[vesselname]['events'] / 2 # write out the new resource files... resourcemanipulation.write_resourcedict_to_file(resourcedicts[vesselname], 'resource.'+vesselname) # Create the stop tuple, exit code 57 with an error message stoptuple = (57, "Fatal system-wide threading error! Stopping all vessels.") # Stop each vessel for vesselname in vesselnamelist: try: # Stop each vessel, using our stoptuple nmAPI.stopvessel(vesselname,stoptuple) except Exception, exp: # Forge on, regardless of errors servicelogger.log("[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: "+str(exp)) servicelogger.log_last_exception()
def handle_threading_error(): """ <Purpose> Handles a repy node failing with ThreadErr. If repy is allowed to use more than 10% of the current threads, reduce the global thread count by 50% and stop all existing vessels <Arguments> None <Exceptions> None <Side Effects> May re-write all resource files and stop all vessels <Returns> None """ # Make a log of this servicelogger.log( "[ERROR]:A Repy vessel has exited with ThreadErr status. Checking to determine next step" ) # Get all the names of the vessels vesselnamelist = nmAPI.vesseldict.keys() # read in all of the resource files so that we can look at and possibly # manipulate them. resourcedicts = {} for vesselname in vesselnamelist: resourcedicts[ vesselname] = resourcemanipulation.read_resourcedict_from_file( 'resource.' + vesselname) # Get the number of threads Repy has allocated allowedthreadcount = 0 for vesselname in vesselnamelist: allowedthreadcount = allowedthreadcount + resourcedicts[vesselname][ 'events'] # Get the total number os system threads currently used totalusedthreads = nonportable.os_api.get_system_thread_count() # Log this information servicelogger.log("[WARNING]:System Threads: " + str(totalusedthreads) + " Repy Allocated Threads: " + str(allowedthreadcount)) # Get the NM configuration configuration = persist.restore_object("nodeman.cfg") # Check if there is a threshold configuration, # otherwise add the default configuration if NOOP_CONFIG_KEY in configuration: threshold = configuration[NOOP_CONFIG_KEY] else: threshold = DEFAULT_NOOP_THRESHOLD configuration[NOOP_CONFIG_KEY] = threshold persist.commit_object(configuration, "nodeman.cfg") # Check if we are below the threshold, if so # then just return, this is a noop if allowedthreadcount < totalusedthreads * threshold: return servicelogger.log("[ERROR]:Reducing number of system threads!") #### We are above the threshold! Let's cut everything by 1/2 # First, update the resource files for vesselname in vesselnamelist: # cut the events by 1/2 resourcedicts[vesselname][ 'events'] = resourcedicts[vesselname]['events'] / 2 # write out the new resource files... resourcemanipulation.write_resourcedict_to_file( resourcedicts[vesselname], 'resource.' + vesselname) # Create the stop tuple, exit code 57 with an error message stoptuple = (57, "Fatal system-wide threading error! Stopping all vessels.") # Stop each vessel for vesselname in vesselnamelist: try: # Stop each vessel, using our stoptuple nmAPI.stopvessel(vesselname, stoptuple) except Exception, exp: # Forge on, regardless of errors servicelogger.log( "[ERROR]:Failed to reset vessel (Handling ThreadErr). Exception: " + str(exp)) servicelogger.log_last_exception()
if vesseldict[vesselname]['status'] == 'Started': raise BadRequest("Attempting to split a running vessel") # get the new name newname1 = get_new_vessel_name() newname2 = get_new_vessel_name() try: proposedresourcedict, call_list = resourcemanipulation.parse_resourcedict_from_string( resourcedata) except resourcemanipulation.ResourceParseError, e: raise BadRequest(str(e)) # we must have enough so that starting - offcut - proposed > 0 startingresourcedict, call_list_vessel = resourcemanipulation.read_resourcedict_from_file( vesseldict[vesselname]['resourcefilename']) offcutresourcedict, offcut_call_list = resourcemanipulation.read_resourcedict_from_file( offcutfilename) # let's see what happens if we just remove the offcut... try: intermediateresourcedict = resourcemanipulation.subtract_resourcedicts( startingresourcedict, offcutresourcedict) except resourcemanipulation.ResourceMathError, e: raise BadRequest( 'Existing resources are so small they cannot be split.\n' + str(e)) # now let's remove the proposed bits! try: