def main(self): usage="%prog [options] url-entry-point(s)" parser=OptionParser(usage=usage) parser.add_option("-d", "--dir", dest="sfi_dir", help="config & working directory - default is " + Sfi.default_sfi_dir(), metavar="PATH", default=Sfi.default_sfi_dir()) parser.add_option("-o","--output",action='append',dest='outfiles',default=[], help="output filenames (cumulative) - defaults are %r"%SfaScan.default_outfiles) parser.add_option("-l","--left-to-right",action="store_true",dest="left_to_right",default=False, help="instead of top-to-bottom") parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0, help="verbose - can be repeated for more verbosity") parser.add_option("-c", "--clean-cache",action='store_true', dest='clean_cache',default=False, help='clean/trash version cache and exit') parser.add_option("-s","--show-cache",action='store_true', dest='show_cache',default=False, help='show/display version cache') (options,args)=parser.parse_args() logger.enable_console() # apply current verbosity to logger logger.setLevelFromOptVerbose(options.verbose) # figure if we need to be verbose for these local classes that only have a bool flag bool_verbose=logger.getBoolVerboseFromOpt(options.verbose) if options.show_cache: VersionCache().show() sys.exit(0) if options.clean_cache: VersionCache().clean() sys.exit(0) if not args: parser.print_help() sys.exit(1) if not options.outfiles: options.outfiles=SfaScan.default_outfiles scanner=Scanner(left_to_right=options.left_to_right, verbose=bool_verbose) entries = [ Interface(entry,mentioned_in="command line") for entry in args ] try: g=scanner.graph(entries) logger.info("creating layout") g.layout(prog='dot') for outfile in options.outfiles: logger.info("drawing in %s"%outfile) g.draw(outfile) logger.info("done") # test mode when pygraphviz is not available except: entry=entries[0] print "GetVersion at %s returned %s"%(entry.url(),entry.get_version())
def __init__(self, sfi_user, sfi_auth, sfi_registry, sfi_sm, private_key, ec, batch, rtype, timeout): self._blacklist = set() self._reserved = set() self._resources_cache = None self._already_cached = False self._ec = ec self.apis = 1 if batch: self._testbed_res = rtype self._count = 0 self._total = self._get_total_res() self._slice_resources_batch = list() self._log = Logger("SFA API") self.api = Sfi() self.rspec_proc = SfaRSpecProcessing() self.lock_slice = threading.Lock() self.lock_blist = threading.Lock() self.lock_resv = threading.Lock() self.api.options.timeout = timeout self.api.options.raw = None self.api.options.user = sfi_user self.api.options.auth = sfi_auth self.api.options.registry = sfi_registry self.api.options.sm = sfi_sm self.api.options.user_private_key = private_key # Load blacklist from file if ec.get_global('PlanetlabNode', 'persist_blacklist'): self._set_blacklist()
def get_version(self): ### if we already know the answer: if self.probed: return self._version ### otherwise let's look in the cache file logger.debug("searching in version cache %s"%self.url()) cached_version = VersionCache().get(self.url()) if cached_version is not None: logger.info("Retrieved version info from cache %s"%self.url()) return cached_version ### otherwise let's do the hard work # dummy to meet Sfi's expectations for its 'options' field class DummyOptions: pass options=DummyOptions() options.verbose=self.verbose options.timeout=10 try: client=Sfi(options) client.read_config() client.bootstrap() key_file = client.private_key cert_file = client.my_gid logger.debug("using key %s & cert %s"%(key_file,cert_file)) url=self.url() logger.info('issuing GetVersion at %s'%url) # setting timeout here seems to get the call to fail - even though the response time is fast #server=SfaServerProxy(url, key_file, cert_file, verbose=self.verbose, timeout=options.timeout) server=SfaServerProxy(url, key_file, cert_file, verbose=self.verbose) self._version=ReturnValue.get_value(server.GetVersion()) except: logger.log_exc("failed to get version") self._version={} # so that next run from this process will find out self.probed=True # store in version cache so next processes will remember for an hour cache=VersionCache() cache.set(self.url(),self._version) cache.save() logger.debug("Saved version for url=%s in version cache"%self.url()) # that's our result return self._version
def get_version(self): if self.probed: return self._version # dummy to meet Sfi's expectations for its 'options' field class DummyOptions: pass options=DummyOptions() options.verbose=False options.timeout=10 try: client=Sfi(options) client.read_config() key_file = client.get_key_file() cert_file = client.get_cert_file(key_file) url=self.url() logger.info('issuing get version at %s'%url) logger.debug("GetVersion, using timeout=%d"%options.timeout) server=xmlrpcprotocol.get_server(url, key_file, cert_file, timeout=options.timeout, verbose=options.verbose) self._version=server.GetVersion() except: self._version={} self.probed=True return self._version
def __init__(self, manager, *args, **kw): ''' Constructor ''' super(SFAPNode, self).__init__(*args, **kw) self.__instances = set() global logger logger = logging.getLogger("SFAPNode") manager.register_adapter("/SFAPNode", self) self.__manager = manager version_dict = {'type':'SFA', 'version':'1'} self.__rspecs = {} self.__sfi = Sfi() self.__sfi.read_config() self.__sfi.bootstrap() self.__credentials = [ self.__sfi.my_credential_string ] self.__options = {} self.__options[ 'geni_rspec_version' ] = version_dict
def get_version(self): ### if we already know the answer: if self.probed: return self._version ### otherwise let's look in the cache file logger.debug("searching in version cache %s" % self.url()) cached_version = VersionCache().get(self.url()) if cached_version is not None: logger.info("Retrieved version info from cache %s" % self.url()) return cached_version ### otherwise let's do the hard work # dummy to meet Sfi's expectations for its 'options' field class DummyOptions: pass options = DummyOptions() options.verbose = self.verbose options.timeout = 10 try: client = Sfi(options) client.read_config() client.bootstrap() key_file = client.private_key cert_file = client.my_gid logger.debug("using key %s & cert %s" % (key_file, cert_file)) url = self.url() logger.info('issuing GetVersion at %s' % url) # setting timeout here seems to get the call to fail - even though the response time is fast #server=SfaServerProxy(url, key_file, cert_file, verbose=self.verbose, timeout=options.timeout) server = SfaServerProxy(url, key_file, cert_file, verbose=self.verbose) self._version = ReturnValue.get_value(server.GetVersion()) except: logger.log_exc("failed to get version") self._version = {} # so that next run from this process will find out self.probed = True # store in version cache so next processes will remember for an hour cache = VersionCache() cache.set(self.url(), self._version) cache.save() logger.debug("Saved version for url=%s in version cache" % self.url()) # that's our result return self._version
def __init__(self, manager, *args, **kw): ''' Constructor ''' super(SFAVNode, self).__init__(*args, **kw) self.__instances = set() manager.register_adapter("/sfavnode*", self) manager.register_adapter("/SFAPNode*/sfavnode", self) #version_dict = {'type':'SFA', 'version':'1'} #version_dict = {'type':'SFA', 'version':'1','schema':None,'namespace':None,'extensions':[]} self.__manager = manager self.__sfi = Sfi() self.__sfi.read_config() self.__sfi.bootstrap() self.__credentials = [ self.__sfi.my_credential_string ] self.__bootstrap = self.__sfi.bootstrap.server_proxy(self.__sfi.sm_url) # self.__options = {} # self.__options[ 'geni_rspec_version' ] = version_dict self.__config = {} global logger logger = logging.getLogger("SFAVNode")
def main(self): usage = "%prog [options] url-entry-point(s)" parser = OptionParser(usage=usage) parser.add_option("-d", "--dir", dest="sfi_dir", help="config & working directory - default is " + Sfi.default_sfi_dir(), metavar="PATH", default=Sfi.default_sfi_dir()) parser.add_option( "-o", "--output", action='append', dest='outfiles', default=[], help="output filenames (cumulative) - defaults are %r" % SfaScan.default_outfiles) parser.add_option("-l", "--left-to-right", action="store_true", dest="left_to_right", default=False, help="instead of top-to-bottom") parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0, help="verbose - can be repeated for more verbosity") parser.add_option("-c", "--clean-cache", action='store_true', dest='clean_cache', default=False, help='clean/trash version cache and exit') parser.add_option("-s", "--show-cache", action='store_true', dest='show_cache', default=False, help='show/display version cache') (options, args) = parser.parse_args() logger.enable_console() # apply current verbosity to logger logger.setLevelFromOptVerbose(options.verbose) # figure if we need to be verbose for these local classes that only have a bool flag bool_verbose = logger.getBoolVerboseFromOpt(options.verbose) if options.show_cache: VersionCache().show() sys.exit(0) if options.clean_cache: VersionCache().clean() sys.exit(0) if not args: parser.print_help() sys.exit(1) if not options.outfiles: options.outfiles = SfaScan.default_outfiles scanner = Scanner(left_to_right=options.left_to_right, verbose=bool_verbose) entries = [ Interface(entry, mentioned_in="command line") for entry in args ] try: g = scanner.graph(entries) logger.info("creating layout") g.layout(prog='dot') for outfile in options.outfiles: logger.info("drawing in %s" % outfile) g.draw(outfile) logger.info("done") # test mode when pygraphviz is not available except: entry = entries[0] print "GetVersion at %s returned %s" % (entry.url(), entry.get_version())
#! /usr/bin/env python # sfi -- slice-based facility interface import sys from sfa.client.sfi import Sfi if __name__ == "__main__": sys.exit(Sfi().main())
class SFAVNode(AbstractResourceAdapter): ''' This class represents a resouce adapter for interopereting with other SFA testbeds ''' class DummyOptions: pass def __init__(self, manager, *args, **kw): ''' Constructor ''' super(SFAVNode, self).__init__(*args, **kw) self.__instances = set() manager.register_adapter("/sfavnode*", self) manager.register_adapter("/SFAPNode*/sfavnode", self) #version_dict = {'type':'SFA', 'version':'1'} #version_dict = {'type':'SFA', 'version':'1','schema':None,'namespace':None,'extensions':[]} self.__manager = manager self.__sfi = Sfi() self.__sfi.read_config() self.__sfi.bootstrap() self.__credentials = [ self.__sfi.my_credential_string ] self.__bootstrap = self.__sfi.bootstrap.server_proxy(self.__sfi.sm_url) # self.__options = {} # self.__options[ 'geni_rspec_version' ] = version_dict self.__config = {} global logger logger = logging.getLogger("SFAVNode") # # Management of the servers # def registry (self): #def registry (self, sfi): # cache the result if not hasattr (self, 'registry_proxy'): self.logger.info("Contacting Registry at: %s"%self.__sfi.reg_url) #self.logger.info("Contacting Registry at: %s"%sfi.reg_url) self.registry_proxy = SfaServerProxy(self.__sfi.reg_url, self.__sfi.private_key, self.__sfi.my_gid) #self.registry_proxy = SfaServerProxy(sfi.reg_url, sfi.private_key, sfi.my_gid) return self.registry_proxy def list_resources(self, parent, typename): assert(typename == "SFAVNode" or not typename) logger.debug('List of running instances %s' %self.__instances) return [ Identifier("/SFAVNode-" + i) for i in self.__instances ] def add_resource(self, parent_id, name, typename, config, owner = None): if not parent_id: raise ValueError("Need a parent") #Creating a new SFI client object # sfi = Sfi() # sfi.read_config() # sfi.bootstrap() # credentials = [ sfi.my_credential_string ] # bootstrap = sfi.bootstrap.server_proxy(sfi.sm_url) #Create new options version_dict = {'type':'SFA', 'version':'1','schema':None,'namespace':None,'extensions':[]} options = {} options[ 'geni_rspec_version' ] = version_dict #Getting the configuration paramter of the parent resource, in this case an RSpec parent = self.__manager.get_resource(parent_id) pconfig = parent.get_configuration() rspec = pconfig['xmlRspec'] logger.debug('RSpec of the parent %s'%rspec) #Saving the hostname of the parent in order to retrieve the slice later hostname = self.fetch_tag(rspec,'hostname') logger.debug('Saved the hostname %s'%hostname) #Getting the vctname and creating the slice_hrn slice_hrn = 'raven.fts.%s' %config['vctname'] slice_urn = hrn_to_urn(slice_hrn, 'slice') logger.info('Creating or updating a slice with the name %s'%slice_hrn) #Preparing the server_proxy object and getting the server version result = self.__bootstrap.GetVersion() server_version= ReturnValue.get_value(result) logger.debug('Received server version %s'%server_version) #Creating the slice record dict or string #recorddict = dict({'hrn': slice_hrn, #'url': 'http://planet-lab.org', #'type': 'slice', #'researcher': ['teagle.teagle.teagle'], #'description': 'Teagle slice'}) slice_str = '<record description="Teagle Slice4" hrn="%s" type="slice" url="http://planet-lab.org"><researcher>teagle.teagle.teagle</researcher></record>' %slice_hrn slicerecord = SfaRecord(string = slice_str).as_dict() logger.debug('Prepared a slice record to add to the registry %s'%slice_str) #Retrieving the credential of the authority auth_cred = self.__sfi.bootstrap.authority_credential_string (self.__sfi.authority) #auth_cred = sfi.bootstrap.authority_credential_string (sfi.authority) logger.debug('Authority %s credentials %s'%(self.__sfi.authority, auth_cred,)) #logger.debug('Authority %s credentials %s'%(sfi.authority, auth_cred,)) #Trying to create the slice try: records = self.registry().Register(slicerecord, auth_cred) #records = self.registry(sfi).Register(slicerecord, auth_cred) except ServerException: logger.debug("Slice already existing") pass #Saving the slice credential creds = [self.__sfi.slice_credential_string(slice_hrn)] #creds = [sfi.slice_credential_string(slice_hrn)] logger.debug('The slice credential: %s'%creds) # users # need to pass along user keys to the aggregate. # users = [ # { urn: urn:publicid:IDN+emulab.net+user+alice # keys: [<ssh key A>, <ssh key B>] # }] users = [] slice_records = self.registry().Resolve(slice_urn, [self.__sfi.my_credential_string]) #slice_records = self.registry(sfi).Resolve(slice_urn, [sfi.my_credential_string]) if slice_records and 'researcher' in slice_records[0] and slice_records[0]['researcher']!=[]: slice_record = slice_records[0] user_hrns = slice_record['researcher'] user_urns = [hrn_to_urn(hrn, 'user') for hrn in user_hrns] user_records = self.registry().Resolve(user_urns, [self.__sfi.my_credential_string]) #user_records = self.registry(sfi).Resolve(user_urns, [sfi.my_credential_string]) if 'sfa' not in server_version: users = pg_users_arg(user_records) rspec = RSpec(rspec) rspec.filter({'component_manager_id': server_version['urn']}) rspec = RSpecConverter.to_pg_rspec(rspec.toxml(), content_type='request') else: users = sfa_users_arg(user_records, slice_record) logger.debug('Creating the sliver using the RSpec %s'%rspec) time.sleep(5) #Creating the sliver logger.debug("###################slice_urn: %s"%(slice_urn,)) logger.debug("###################creds: %s"%(creds,)) logger.debug("###################rspec: %s"%(rspec,)) logger.debug("###################users: %s"%(users,)) logger.debug("###################options: %s"%(options,)) result = self.__bootstrap.CreateSliver(slice_urn, creds, rspec, users, options) #result = bootstrap.CreateSliver(slice_urn, creds, rspec, users,options) value = ReturnValue.get_value(result) logger.debug("###################return value: %s"%(value, )) options['geni_slice_urn'] = hrn_to_urn(slice_hrn, 'slice') options['call_id'] = unique_call_id() slice_credentials = self.__sfi.slice_credential_string(slice_hrn) #slice_credentials = sfi.slice_credential_string(slice_hrn) list_resources = self.__bootstrap.ListResources(slice_credentials, options) #list_resources = bootstrap.ListResources(slice_credentials,options) #self.__sfi.bootstrap.server_proxy(self.__sfi.sm_url) logger.debug( "ListResources of slice %s returned : %s"%(slice_hrn,list_resources['value'])) slice_rspec = RSpec(list_resources['value']) nodes = slice_rspec.version.get_nodes() for node in nodes: component_name = '' component_name = node['component_name'] tags = self.convert_tags(node['tags']) node_hostname = tags['hostname'] if hostname in node_hostname: #store the information of the specific sliver in the config ###XXX change the username with the real sliver name sliver_name = self.fetch_username(slice_rspec) ip = self.fetch_ip(slice_rspec) name = '%s.%s'%(hostname,sliver_name,) conf = {'public_ip':ip,'username':sliver_name,'password':None} #self.__config['/SFAVNode-%s' %name] = conf self.__config[name] = conf logger.info('Adding the resource instance %s '%name) time.sleep(10) if not self.poll_machine(ip, sliver_name): raise Exception('Connection with the sliver not possible') if name in self.__instances: raise DuplicateNameError(parent_id, typename, name) self.__instances.add(name) return name def have_resource(self, identifier): assert(identifier.typename == "sfavnode") logger.debug('identifier %s'%identifier) return identifier.name in self.__instances def get_resource(self, identifier): return identifier def get_configuration(self, identifier): logger.debug("identifier %s and resource name %s"%(identifier,identifier.resourcename,)) assert(identifier.typename == "sfavnode") if not self.have_resource(identifier): raise InstanceNotFound(identifier) return self.__config[identifier.resourcename] def set_configuration(self, identifier, config): assert(identifier.parent == None) assert(identifier.typename == "SFAVNode") return def get_attribute(self, identifier, name): assert(identifier.parent == None) assert(identifier.typename == "SFAVNode") raise ConfigurationAttributeError(name) def set_attribute(self, identifier, name, value): assert(identifier.parent == None) #************************ def delete_resource(self, identifier, owner, force = False): assert(identifier.parent == None) assert(identifier.typename == "SFAVNode") #TODO create the method for removing this sliver instanciated #Create new options version_dict = {'type':'SFA', 'version':'1','schema':None,'namespace':None,'extensions':[]} options = {} options[ 'geni_rspec_version' ] = version_dict self.__instances.pop(identifier.resourcename) def get_cached_server_version(self, server, options): # check local cache first cache = None version = None cache_file = os.path.join(options.sfi_dir,'sfi_cache.dat') cache_key = server.url + "-version" try: cache = Cache(cache_file) except IOError: cache = Cache() self.logger.info("Local cache not found at: %s" % cache_file) if cache: version = cache.get(cache_key) if not version: result = server.GetVersion() version= ReturnValue.get_value(result) # cache version for 20 minutes cache.add(cache_key, version, ttl= 60*20) self.logger.info("Updating cache file %s" % cache_file) cache.save_to_file(cache_file) return version @staticmethod def default_sfi_dir (): if os.path.isfile("./sfi_config"): return os.getcwd() else: return os.path.expanduser("~/.sfi/") def poll_machine(self, ip, uname, passwd = None): sshclient = paramiko.SSHClient() sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy()) counter = 0 connection = False while(counter < 100): counter = counter + 1 try: sshclient.connect(ip, username=uname, password=None) connection = True # logger.debug("connection possible") break except: # logger.debug("no connection possible") time.sleep(10) sshclient.close() return connection def convert_tags(self, tags): tags_dict = {} for tag in tags: tags_dict[tag['tagname']] = tag['value'] return tags_dict def fetch_tag(self, rspec_string, tag): hostnames = [] rspec = RSpec(rspec_string) nodes = rspec.version.get_nodes(); for node in nodes: tags = self.convert_tags(node['tags']) hostnames.append(tags[tag]) #if len(hostnames) > 0:#???? return hostnames[0] def fetch_ip(self, rspec): ips = [] nodes = rspec.version.get_nodes(); for node in nodes: interfaces = node['interfaces'] for interface in interfaces: ips.append(interface['ipv4']) #if len(hostnames) > 0:#???? return ips[0] def fetch_username(self, rspec): username = '' filter = {} xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter)) node_elems = rspec.xml.xpath(xpath) for node_elem in node_elems: for services_elem in node_elem.xpath('./default:services | ./services'): for login_elem in services_elem.xpath('./default:login | ./login'): username = login_elem.attrib['username'] return username def execute_method(self, identifier, name, *args, **kw ): assert(identifier.parent == None) assert(identifier.typename == "SFAVNode") try: fun = getattr(self, 'execute_%s' %(name,)) except AttributeError: raise NoSuchMethodError(identifier, name) return fun(identifier,**kw) def execute_update(self, identifier, **kw): logger.debug("update method") return def execute_start(self, identifier, **kw): logger.debug("start method") return def execute_stop(self, identifier, **kw): logger.debug("stop method") return
class SFAPNode(AbstractResourceAdapter): ''' This class represents a resouce adapter for interopereting with other SFA testbeds ''' def __init__(self, manager, *args, **kw): ''' Constructor ''' super(SFAPNode, self).__init__(*args, **kw) self.__instances = set() global logger logger = logging.getLogger("SFAPNode") manager.register_adapter("/SFAPNode", self) self.__manager = manager version_dict = {'type':'SFA', 'version':'1'} self.__rspecs = {} self.__sfi = Sfi() self.__sfi.read_config() self.__sfi.bootstrap() self.__credentials = [ self.__sfi.my_credential_string ] self.__options = {} self.__options[ 'geni_rspec_version' ] = version_dict #fill self.__instances # temp = list_resources(None, "SFAPNode") def list_resources(self, parent, typename): assert(typename == "SFAPNode" or not typename) assert(parent == None) list_resources = self.__sfi.bootstrap.server_proxy(self.__sfi.sm_url).ListResources(self.__credentials,self.__options) self.__sfi.bootstrap.server_proxy(self.__sfi.sm_url) logger.debug( "ListResources at %s returned : %s"%(self.__sfi.sm_url,list_resources['value'])) rspec = RSpec(list_resources['value']) nodes = rspec.version.get_nodes() networks = rspec.version.get_networks() for node in nodes: version = {'namespace': None, 'version': '1', 'type': 'SFA', 'extensions': [], 'schema': None} rspec_tmp = RSpec(version = version) rspec_tmp.version.add_network(networks[0]['name']) rspec_tmp.version.add_nodes([node]) component_name = '' component_name = node['component_name'] rspec_tmp.version.add_slivers(hostnames = [component_name]) rspec_tmp_xml = RSpecConverter.to_sfa_rspec(rspec_tmp.toxml()) self.__instances.add(component_name) rspec_xml = {} rspec_xml['xmlRspec'] = rspec_tmp_xml self.__rspecs['/SFAPNode-%s' %component_name] = rspec_xml logger.debug("PNodes %s " %component_name) logger.debug(self.__rspecs) return [ Identifier("/SFAPNode-%s" %i) for i in self.__instances ] def add_resource(self, parent_id, name, typename, config, owner = None): return random.choice(self.__instances) def have_resource(self, identifier): assert(identifier.parent == None) assert(identifier.typename == "SFAPNode") return identifier.name in self.__instances def get_resource(self, identifier): return identifier def get_configuration(self, identifier): assert(identifier.parent == None) assert(identifier.typename == "SFAPNode") if not self.have_resource(identifier): raise InstanceNotFound(identifier) return self.__rspecs[identifier] def set_configuration(self, identifier, config): assert(identifier.parent == None) assert(identifier.typename == "SFAPNode") config return def get_attribute(self, identifier, name): assert(identifier.parent == None) assert(identifier.typename == "SFAPNode") raise ConfigurationAttributeError(name) def set_attribute(self, identifier, name, value): assert(identifier.parent == None) def delete_resource(self, identifier, owner, force = False): assert(identifier.parent == None) assert(identifier.typename == "SFAPNode") self.__instances.pop(identifier.resourcename) def _get_rspec(self, identifier): return self.rspecs[identifier] def execute_method(self, identifier, name, *args, **kw ): assert(identifier.parent == None) assert(identifier.typename == "SFAPNode") try: fun = getattr(self, 'execute_%s' %(name,)) except AttributeError: raise NoSuchMethodError(identifier, name) return fun(identifier,**kw) def execute_update(self, identifier, **kw): logger.debug("update method") return def execute_start(self, identifier, **kw): logger.debug("start method") return def execute_stop(self, identifier, **kw): logger.debug("stop method") return
class SFAAPI(object): """ API for quering the SFA service. It uses Sfi class from the tool sfi client. """ def __init__(self, sfi_user, sfi_auth, sfi_registry, sfi_sm, private_key, ec, batch, rtype, timeout): self._blacklist = set() self._reserved = set() self._resources_cache = None self._already_cached = False self._ec = ec self.apis = 1 if batch: self._testbed_res = rtype self._count = 0 self._total = self._get_total_res() self._slice_resources_batch = list() self._log = Logger("SFA API") self.api = Sfi() self.rspec_proc = SfaRSpecProcessing() self.lock_slice = threading.Lock() self.lock_blist = threading.Lock() self.lock_resv = threading.Lock() self.api.options.timeout = timeout self.api.options.raw = None self.api.options.user = sfi_user self.api.options.auth = sfi_auth self.api.options.registry = sfi_registry self.api.options.sm = sfi_sm self.api.options.user_private_key = private_key # Load blacklist from file if ec.get_global('PlanetlabNode', 'persist_blacklist'): self._set_blacklist() def _set_blacklist(self): """ Initialize the blacklist with previous nodes blacklisted, in previous runs. """ nepi_home = os.path.join(os.path.expanduser("~"), ".nepi") plblacklist_file = os.path.join(nepi_home, "plblacklist.txt") with open(plblacklist_file, 'r') as f: hosts_tobl = f.read().splitlines() if hosts_tobl: for host in hosts_tobl: self._blacklist.add(host) def _get_total_res(self): """ Get the total amount of resources instanciated using this API, to be able to add them using the same Allocate and Provision call at once. Specially for Wilabt testbed that doesn't allow to add slivers after the slice already has some. """ rms = list() res_gids = self._ec.resources for gid in res_gids: rm = self._ec.get_resource(gid) if self._testbed_res.lower() in rm._rtype.lower(): rms.append(rm) return rms def _sfi_exec_method(self, command, slicename=None, rspec=None, urn=None, action=None): """ Execute sfi method, which correspond to SFA call. It can be the following calls: Describe, Delete, Allocate, Provision, ListResources. """ if command in ['describe', 'delete', 'allocate', 'provision', 'action']: if not slicename: raise TypeError("The slice hrn is expected for this method %s" % command) if command == 'allocate' and not rspec: raise TypeError("RSpec is expected for this method %s" % command) if command == 'allocate': args_list = [slicename, rspec] else: args_list = [slicename] if command != 'delete': args_list = args_list + ['-o', '/tmp/rspec_output'] if command == 'action': args_list = [slicename, action] elif command == 'resources': args_list = ['-o', '/tmp/rspec_output'] else: raise TypeError("Sfi method not supported") self.api.command = command self.api.command_parser = self.api.create_parser_command(self.api.command) (command_options, command_args) = self.api.command_parser.parse_args(args_list) self.api.command_options = command_options self.api.read_config() self.api.bootstrap() try: os.remove("/tmp/rspec_output.rspec") except OSError: self._log.debug("Couldn't remove temporary output file for RSpec or it doesn't exist") try: self.api.dispatch(command, command_options, command_args) with open("/tmp/rspec_output.rspec", "r") as result_file: result = result_file.read() return result except: self._log.debug(" Couldn't retrive rspec output information from method %s " % command) return None def get_resources_info(self): """ Get all resources and its attributes from aggregate. """ try: rspec_slice = self._sfi_exec_method('resources') except: raise RuntimeError("Fail to list resources") self._resources_cache = self.rspec_proc.parse_sfa_rspec(rspec_slice) self._already_cached = True return self._resources_cache def get_resources_hrn(self, resources=None): """ Get list of resources hrn, without the resource info. """ if not resources: if not self._already_cached: resources = self.get_resources_info()['resource'] else: resources = self._resources_cache['resource'] component_tohrn = dict() for resource in resources: hrn = resource['hrn'].replace('\\', '') component_tohrn[resource['component_name']] = hrn return component_tohrn def get_slice_resources(self, slicename): """ Get resources and info from slice. """ try: with self.lock_slice: rspec_slice = self._sfi_exec_method('describe', slicename) except: self._log.debug("Fail to describe resources for slice %s, slice may be empty" % slicename) if rspec_slice is not None: result = self.rspec_proc.parse_sfa_rspec(rspec_slice) return result else: return {'resource':[],'lease':[]} def add_resource_to_slice(self, slicename, resource_hrn, leases=None): """ Get the list of resources' urn, build the rspec string and call the allocate and provision method. """ resources_hrn_new = list() resource_parts = resource_hrn.split('.') resource_hrn = '.'.join(resource_parts[:2]) + '.' + '\\.'.join(resource_parts[2:]) resources_hrn_new.append(resource_hrn) with self.lock_slice: rspec_slice = self._sfi_exec_method('describe', slicename) if rspec_slice is not None: slice_resources = self.rspec_proc.parse_sfa_rspec(rspec_slice)['resource'] else: slice_resources = [] if slice_resources: slice_resources_hrn = self.get_resources_hrn(slice_resources) for s_hrn_key, s_hrn_value in slice_resources_hrn.iteritems(): s_parts = s_hrn_value.split('.') s_hrn = '.'.join(s_parts[:2]) + '.' + '\\.'.join(s_parts[2:]) resources_hrn_new.append(s_hrn) resources_urn = self._get_resources_urn(resources_hrn_new) rspec = self.rspec_proc.build_sfa_rspec(slicename, resources_urn, None, leases) f = open("/tmp/rspec_input.rspec", "w") f.truncate(0) f.write(rspec) f.close() if not os.path.getsize("/tmp/rspec_input.rspec") > 0: raise RuntimeError("Fail to create rspec file to allocate resource in slice %s" % slicename) # ALLOCATE try: self._log.debug("Allocating resources in slice %s" % slicename) out = self._sfi_exec_method('allocate', slicename, "/tmp/rspec_input.rspec") except: raise RuntimeError("Fail to allocate resource for slice %s" % slicename) if out is not None: # PROVISION try: self._log.debug("Provisioning resources in slice %s" % slicename) self._sfi_exec_method('provision', slicename) except: raise RuntimeError("Fail to provision resource for slice %s" % slicename) return True def add_resource_to_slice_batch(self, slicename, resource_hrn, properties=None, leases=None): """ Method to add all resources together to the slice. Previous deletion of slivers. Specially used for wilabt that doesn't allow to add more resources to the slice after some resources are added. Every sliver have to be deleted and the batch has to be added at once. """ self._count += 1 self._slice_resources_batch.append(resource_hrn) resources_hrn_new = list() if self._count == len(self._total): check_all_inslice = self._check_all_inslice(self._slice_resources_batch, slicename) if check_all_inslice == True: return True for resource_hrn in self._slice_resources_batch: resource_parts = resource_hrn.split('.') resource_hrn = '.'.join(resource_parts[:2]) + '.' + '\\.'.join(resource_parts[2:]) resources_hrn_new.append(resource_hrn) with self.lock_slice: if check_all_inslice != 0: self._sfi_exec_method('delete', slicename) time.sleep(480) # Re implementing urn from hrn because the library sfa-common doesn't work for wilabt resources_urn = self._get_urn(resources_hrn_new) rspec = self.rspec_proc.build_sfa_rspec(slicename, resources_urn, properties, leases) f = open("/tmp/rspec_input.rspec", "w") f.truncate(0) f.write(rspec) f.close() if not os.path.getsize("/tmp/rspec_input.rspec") > 0: raise RuntimeError("Fail to create rspec file to allocate resources in slice %s" % slicename) # ALLOCATE try: self._log.debug("Allocating resources in slice %s" % slicename) out = self._sfi_exec_method('allocate', slicename, "/tmp/rspec_input.rspec") except: raise RuntimeError("Fail to allocate resource for slice %s" % slicename) if out is not None: # PROVISION try: self._log.debug("Provisioning resources in slice %s" % slicename) self._sfi_exec_method('provision', slicename) self._sfi_exec_method('action', slicename=slicename, action='geni_start') except: raise RuntimeError("Fail to provision resource for slice %s" % slicename) return True else: raise RuntimeError("Fail to allocate resources for slice %s" % slicename) else: self._log.debug(" Waiting for more nodes to add the batch to the slice ") def _check_all_inslice(self, resources_hrn, slicename): slice_res = self.get_slice_resources(slicename)['resource'] if slice_res: if len(slice_res[0]['services']) != 0: slice_res_hrn = self.get_resources_hrn(slice_res).values() if self._compare_lists(slice_res_hrn, resources_hrn): return True else: return len(slice_res_hrn) return 0 def _compare_lists(self, list1, list2): if len(list1) != len(list2): return False for item in list1: if item not in list2: return False return True def _get_urn(self, resources_hrn): """ Get urn from hrn. """ resources_urn = list() for hrn in resources_hrn: hrn = hrn.replace("\\", "").split('.') node = hrn.pop() auth = '.'.join(hrn) urn = ['urn:publicid:IDN+', auth, '+node+', node] urn = ''.join(urn) resources_urn.append(urn) return resources_urn def remove_resource_from_slice(self, slicename, resource_hrn, leases=None): """ Remove slivers from slice. Currently sfi doesn't support removing particular slivers. """ resource_urn = self._get_resources_urn([resource_hrn]).pop() with self.lock_slice: try: self._sfi_exec_method('delete', slicename, urn=resource_urn) except: raise RuntimeError("Fail to delete resource for slice %s" % slicename) return True def remove_all_from_slice(self, slicename): """ De-allocate and de-provision all slivers of the named slice. Currently sfi doesn't support removing particular slivers, so this method works only for removing every sliver. Setting the resource_hrn parameter is not necessary. """ with self.lock_slice: try: self._sfi_exec_method('delete', slicename) except: raise RuntimeError("Fail to delete slivers for slice %s" % slicename) return True def _get_resources_urn(self, resources_hrn): """ Builds list of resources' urn based on hrn. """ resources_urn = list() for resource in resources_hrn: resources_urn.append(hrn_to_urn(resource, 'node')) return resources_urn def blacklist_resource(self, resource_hrn): """ Adding resource_hrn to blacklist, and taking the resource from the reserved list. """ with self.lock_blist: self._blacklist.add(resource_hrn) with self.lock_resv: if resource_hrn in self._reserved: self._reserved.remove(resource_hrn) def blacklisted(self, resource_hrn): """ Check if the resource is in the blacklist. """ with self.lock_blist: if resource_hrn in self._blacklist: return True return False def reserve_resource(self, resource_hrn): """ Add resource to the reserved list. """ self._reserved.add(resource_hrn) def reserved(self, resource_hrn): """ Check that the resource in not reserved. """ with self.lock_resv: if resource_hrn in self._reserved: return True else: self.reserve_resource(resource_hrn) return False def release(self): """ Remove hosts from the reserved and blacklist lists, and in case the persist attribute is set, it saves the blacklisted hosts in the blacklist file. """ self.apis -= 1 if self.apis == 0: blacklist = self._blacklist self._blacklist = set() self._reserved = set()