def begin(args): # use fqdn to determine local endpoints name = socket.gethostname() fqdn = socket.getfqdn() LOCAL_DEPOT = {"ibp://{}:6714".format(fqdn): {"enabled": True}} LOCAL_UNIS = "http://{}:{}".format(fqdn, LOCAL_UNIS_PORT) urls = [{"default": True, "url": LOCAL_UNIS}] opts = {"cache": {"preload": ["nodes", "services", "exnodes"]}} rt = Runtime(urls, **opts) sess = libdlt.Session(rt, bs="5m", depots=LOCAL_DEPOT, threads=1) # get the names of exnodes already present in UNIS exnodes_present = [] for x in rt.exnodes: exnodes_present.append(x.name) # get the list of recent USGS Landsat images m_url2metadata, m_url2files_to_upload = download_recent_USGS_TIFFS( args.ndays, exnodes_present, args.coordinates) # m_url2files_to_upload.keys is a subset of m_url2metadata.keys, possibly a strict subset for U in m_url2files_to_upload: metadata = m_url2metadata[U] files_to_upload = m_url2files_to_upload[U] fn_path = files_to_upload for F in files_to_upload: fn, fn_path = F time_to_insert, exn = sess.upload(filepath=fn_path) #time_to_insert,exn = sess.upload(filepath='./app.py') add_metadata(exn, metadata, fn) log.info('Inserted exnode %s' % fn)
def init_runtime(remote, local, local_only): while True: try: opts = { "cache": { "preload": ["nodes", "services"] }, "proxy": { "defer_update": True } } if local_only: urls = [{"default": True, "url": local}] log.debug("Connecting to UNIS instance(s): {}".format(local)) else: urls = [{"url": local}, {"default": True, "url": remote}] log.debug( "Connecting to UNIS instance(s): {}".format(remote + ',' + local)) rt = Runtime(urls, **opts) if local_only: rt.exnodes.addCallback(file_cb) return rt except (ConnectionError, TimeoutError) as exp: log.warn( "Could not contact UNIS servers {}, retrying...".format(urls)) time.sleep(5)
def orphaned_items(url): with Runtime(url) as rt: n = node(rt, "in-orpaned-domain") p = port(rt, n) domain = Domain({"name": "orphaned-domain", "nodes": [n], "ports": [p]}) rt.insert(domain) n = node(rt, "orphaned-node") p = port(rt) commit(rt)
def do_get_unis_nodes(self, args): from unis.runtime import Runtime try: rt = Runtime() print(rt.nodes) except Exception as e: print("Error: %s" % e) import traceback traceback.print_exc()
def connect(self, hosts): opts = {"cache": { "preload": ["nodes", "services"] }, "proxy": { "subscribe": False, "defer_update": True }} log.debug(f"Connecting to UNIS instance(s): {', '.join([v['url'] for v in hosts])}") while not self.rt: try: self.rt = Runtime(hosts, **opts) except (ConnectionError, TimeoutError, UnisReferenceError) as e: log.warning(f"Could not contact UNIS servers {', '.join([v['url'] for v in hosts])}, retrying...") log.debug(f"-- {e}") time.sleep(self.cfg['engine']['interval'])
def osiris(url): with Runtime(url) as rt: domain_names = ["IU", "WSU", "MSU", "CHIC", "SALT", "SC16", "IU-Crest", "UMich", "Cloudlab"] link_map = [("IU", "CHIC"), ("UMich", "CHIC"), ("WSU", "CHIC"), ("MSU", "CHIC"), ("CHIC", "SALT"), ("Cloudlab", "SALT"), ("SC16", "SALT"), ("SC16", "UMich"), ("SC16", "IU-Crest")] nodes = [node(rt, d) for d in domain_names] ports = [port(rt, n) for n in nodes] links = [link(rt, l) for l in link_map] domains = [domain(rt, d) for d in domain_names] topology(rt, "OSiRIS", nodes, ports, links, domains) commit(rt)
def populate(url): "Place test topology into UNIS" with Runtime(url) as rt: node1 = Node({"id": "node1"}) node2 = Node({"id": "node2"}) node3 = Node({"id": "node3"}) node4 = Node({"id": "node4"}) port1 = Port({"id": "port1"}) port2 = Port({"id": "port2"}) port3 = Port({"id": "port3"}) port4 = Port({"id": "port4"}) node1.ports.append(port1) node2.ports.append(port2) node2.ports.append(port3) node2.ports.append(port4) link1 = Link({ "id": "link1-2", "directed": False, "endpoints": [port1, port2] }) link2 = Link({ "id": "link2-3", "directed": False, "endpoints": [port2, port3] }) link3 = Link({ "id": "link3-4", "directed": False, "endpoints": [port3, port4] }) topology = Topology({ "id": "test", "ports": [port1, port2, port3, port4], "nodes": [node1, node2, node3, node4], "links": [link1, link2, link3] }) rt.insert(port1, commit=True) rt.insert(port2, commit=True) rt.insert(port3, commit=True) rt.insert(port4, commit=True) rt.insert(node1, commit=True) rt.insert(node2, commit=True) rt.insert(node3, commit=True) rt.insert(node4, commit=True) rt.insert(link1, commit=True) rt.insert(link2, commit=True) rt.insert(link3, commit=True) rt.insert(topology, commit=True)
def __init__(self, *args, **kwargs): super(OSIRISApp, self).__init__(*args, **kwargs) self.mac_to_port = {} self.datapaths = {} self.CONF.register_opts([ cfg.StrOpt('unis_domain', default=''), cfg.StrOpt('unis_server', default='http://localhost:8888'), cfg.StrOpt('unis_update_interval', default='5'), cfg.StrOpt('unis_host', default='http://localhost:8888'), cfg.StrOpt('ovsdb_addr', default='"tcp:127.0.0.1:6650"') ], group="osiris") self.domain_name = self.CONF.osiris.unis_domain unis_server = self.CONF.osiris.unis_server self.ovsdb_addr = self.CONF.osiris.ovsdb_addr self.unis_server = self.CONF.osiris.unis_server self.unis_host = self.CONF.osiris.unis_host self.interval_secs = int(self.CONF.osiris.unis_update_interval) self.logger.info("----- UPDATE INTERVAL IS %d -------" % self.interval_secs) self.logger.info("Connecting to UNIS Server at "+unis_server) self.logger.info("Connecting to Domain: "+self.domain_name) ## UnisRT debug lines #trace.setLevel(lace.logging.DEBUG) self.logger.info("UNIS SERVER: " + str( self.CONF.osiris.unis_server)) self.rt = Runtime([unis_server], proxy={ 'subscribe':True,'defer_update':True} , name="main_rt") print(self.rt.settings['proxy']) self.update_time_secs = calendar.timegm(time.gmtime()) # Transient dict of LLDP-discovered Nodes, Ports and Links which are reset every cycle self.alive_dict = dict() # Persistent dict of Switch Nodes, Ports which are not reset every cycle, modified only on OF events self.switches_dict = dict() # checks for topologies, if none, create a local topology. TODO: if domain_obj has changed, push a topology that references the new guy. self.logger.info("Checked domain") self.create_domain() self.logger.info("Making Topology...") self.instantiate_local_topology() self.logger.info("Attemping to Update Host Topology") self.check_update_host_topology() self.logger.info('UPDATED HOST TOPOLOGY') self.logger.info("Created initial RT instance") self.nodelist = {}
def ring_spur(url, ring, spurs): def name(link, spur): return "Node-{0}-{1}".format(link, spur) node_names = [name(i, j) for j in range(spurs) for i in range(ring)] ring_links = [(name(i,0), name((i+1)%ring, 0)) for i in range(ring)] spur_links = [(name(i,j), name(i, 0)) for j in range(1, spurs) for i in range(ring)] link_map = list(itertools.chain(ring_links, spur_links)) with Runtime(url) as rt: nodes = [node(rt, d) for d in node_names] ports = [port(rt, n) for n in nodes] links = [link(rt, l) for l in link_map] domain = Domain({"name": "ring-domain", "nodes": nodes, "ports": ports, "links": links}) rt.insert(domain) topology(rt, "ring", nodes, ports, [], [domain]) commit(rt)
def __init__(self, *args, **kwargs): super(OSIRISApp, self).__init__(*args, **kwargs) self.mac_to_port = {} self.datapaths = {} self.CONF.register_opts([ cfg.StrOpt('unis_domain', default=''), cfg.StrOpt('unis_server', default='http://localhost:8888'), cfg.StrOpt('unis_update_interval', default='30'), ], group="osiris") self.domain_name = self.CONF.osiris.unis_domain unis_server = self.CONF.osiris.unis_server self.interval_secs = int(self.CONF.osiris.unis_update_interval) self.logger.info("----- UPDATE INTERVAL IS %d -------" % self.interval_secs) self.logger.info("Connecting to UNIS Server at " + unis_server) self.logger.info("Connecting to Domain: " + self.domain_name) self.rt = Runtime(unis_server, subscribe=False, defer_update=True) self.create_domain() self.update_time_secs = calendar.timegm(time.gmtime()) # Transient dict of LLDP-discovered Nodes, Ports and Links which are reset every cycle self.alive_dict = dict() # Persistent dict of Switch Nodes, Ports which are not reset every cycle, modified only on OF events self.switches_dict = dict()
dev_id2name_mapping = {} # for uploading files #import libdlt UNIS_URL = 'http://localhost:9000' HAVE_UNIS = False UNIS_FAIL = -1 rt = UNIS_FAIL try: #from unis import Runtime # is there a difference? from unis.runtime import Runtime from unis.models import Node, schemaLoader from unis.models import Metadata rt = Runtime(UNIS_URL) except: # possible alternative, depending on the environment try: # in this case, Ubuntu 16.04 via Windows Subsystem for Linux # once more with less fail? #sys.path.append('/home/minion/repobin/Lace') # if needed from unis import Runtime from unis.models import Node, schemaLoader from unis.models import Metadata rt = Runtime(UNIS_URL) except: pass HAVE_UNIS = rt != UNIS_FAIL # borrowed this from the ferry code, written by Jeremy Musser and Dr. Ezra Kissel,
return None def check_link_in_unis(self, link): link_name = link.name print("CHECKING FOR LINK ", link_name) for link in self.rt.links: if link.name == link_name: return item print("LINK NOT FOUND IN UNIS", link_name) return None def show_resources(self): pp.pprint(self.nodes) pp.pprint(self.ports) pp.pprint(self.links) ############################################################################################# ''' Main defined for convenient testing. And Usage. ''' if __name__ == "__main__": rt = Runtime('http://msu-ps01.osris.org:8888', subscribe=False, defer_update=True) SRB = StaticResourceBuilder('config/static_resources.ini', rt) SRB.manifest() SRB.show_resources() print("Done writing static resources)
import argparse from colorama import init, Fore, Back, Style import requests import json # bear in mind the importation of these modules will be executed above the # modules' containing directory import bridge if bridge.HAVE_UNIS: try: #from unis import Runtime # is there a difference? from unis.runtime import Runtime from unis.models import Node, schemaLoader from unis.models import Metadata rt = Runtime('http://localhost:9000') print('able to import everything!') except: # possible alternative, depending on the environment try: # in this case, Ubuntu 16.04 via Windows Subsystem for Linux sys.path.append('/home/minion/repobin/Lace') sys.path.append('/home/minion/repobin/UNISrt') # once more with less fail? from unis import Runtime #from unis.runtime import Runtime from unis.models import Node, schemaLoader from unis.models import Metadata rt = Runtime('http://localhost:9000') print('able to import everything!') except: pass
def check_update_host_topology(self): ''' Checks the currect osiris.json esque topology on the host node, and update the associated hrefs to match the current switch's domain href. Also updates the link in the host that connects the topology to ChicPOP ''' self.logger.info("UNIS HOST: " + str(self.unis_host)) host_rt = Runtime([self.unis_host], name="remote") # we are going to update the 'main' topology based on the what is in the configuration file topology = host_rt.topologies[0] # the first topology instance is the most recent and AFAIK the one we want topology_dict = topology.to_JSON() # this is how we get around the Runtime essentially sandboxing us, treat JSON as a dict. href_list = [] # create something to store the hrefs we are about to gather for i in range(0, len(topology.domains)): # iterate with respect to key on each domain, test against that href domain_href = topology_dict['domains'][i]['href'] self.logger.info("Finding HREF" + str(domain_href)) href_list.append(domain_href) match = None # instantiate something to store the href if we hit a match def clean_up(topology): domain_exists = False # once we see a domain once we should remove copies of it. self.logger.info("Finishing Up startup, cleaning up topologies.") new_domains = [] for domain in topology.domains: try: temp_name = domain.name if temp_name == self.domain_obj.name and domain_exists == False: self.logger.info("Ensured instance of local domain in remote topology") new_domains.append(domain) domain_exists = True elif temp_name == self.domain_obj.name and domain_exists == True: self.logger.info("Found Duplicate of local domain obj in remote topology, deleting..") topology.domains.remove(domain) #self.logger.info(topology.domains.to_JSON()) else: new_domains.append(domain) except: self.logger.info("Delete Broken Domain") topology.domains.remove(domain) topology.domains = new_domains topology.commit() host_rt.flush() return for index, href in enumerate(href_list): # time to sift through the different unis instances unis_href = href.split('8888', 1)[0] + '8888' # regex here?, TODO? self.logger.info("TESTING OUT " + str(unis_href)) current_rt = Runtime([unis_href], name="current"+str(index)) try: #most_recent_domain = next(current_rt.domains.where({"name":self.domain_obj.name})) most_recent_domain = current_rt.domains[0] self.logger.info("Comparing " + str(self.domain_obj.name) + " with " + str(most_recent_domain.name)) if self.domain_obj.name == most_recent_domain.name: # KEY: production switches now need to properly set the unis_domain setting in the config file from now on self.logger.info("Found current matching domain in UNIS Host...") match = unis_href topology.domains[index] = most_recent_domain host_rt.flush() # not sure if this is necessary, will experiment self.logger.info("\nDomain: " + str(self.domain_obj.name) + ", updated domain object successfully at " + str( topology.selfRef) + " with href - " + str (href) + "\n") topology.commit() self.logger.info("Flushing change to Host RT " + self.unis_host) host_rt.flush() link = '' try: # update the link as well link_name = "link-" + self.domain_obj.name + "-CHIC" # string - 'link-UM-CHIC' self.logger.info("TESTING AGAINST LINK NAME: " + link_name) link_map = list(map(lambda link: link.name == link_name, topology.links)) self.logger.info("Link Map - " + str(link_map)) for key, l in enumerate(topology.links): if link_map[key] == True: print(link_map[key]) link = l link.endpoints[0] = most_recent_domain self.logger.info('Verified the link to this domain.\n') if link == '' or topology.links == []: # no link was found, add it to the topology self.logger.info("No link found for this domain, creating and adding it to host topology...") new_link = Link({"name": link_name, "directed": False, "endpoints": [most_recent_domain, {"href" : "$.domains[?(@.name==\"CHIC PoP\")]", "rel": "full"}]}) topology.links.append(new_link) host_rt.flush() self.logger.info("Generated new link to the current domain.\n") except Exception: print("EXCEPTION") self.logger.info('Could not update interdomain link.') except Exception as e: self.logger.exception("Exception: ") self.logger.info("Domain not found, deleting from topology entry") self.logger.info("Domain index: " + str(index) + " | HREF: " + href) self.logger.info('Trouble Updating Unis Host Topology... Continuing') if match is None: # TODO: occurs if no match was found, if so then add it to the topology, not sure if would work correctly in this object though.. self.logger.info('No match found for: ' + str(self.domain_obj.name) + ', adding domain to host site, '+ str( topology.selfRef)) # not sure how to go about this since a we are not pushing the remote object to the host but instead 'updating' it. new_domain = self.domain_obj topology.domains.append(new_domain) topology.commit() host_rt.flush() clean_up(topology) return
def cached_connection(cls, source): rt = cls._runtime_cache.get(source, Runtime(source)) cls._runtime_cache[source] = rt return rt