示例#1
0
文件: network.py 项目: Jeswang/icarus
    def __init__(self, topology, shortest_path=None):
        """Constructors
        
        Parameters
        ----------
        topology : fnss.Topology
            The topology object
        shortest_path : dict of dict, optional
            The all-pair shortest paths of the network
        """
        # Filter inputs
        if not isinstance(topology, fnss.Topology):
            raise ValueError('The topology argument must be an instance of '
                             'fnss.Topology or any of its subclasses.')
        
        # Shortest paths of the network
        self.shortest_path = shortest_path if shortest_path is not None \
                             else nx.all_pairs_shortest_path(topology)
        
        # Network topology
        self.topology = topology
        
        # Dictionary mapping each content object to its source
        # dict of location of contents keyed by content ID
        self.content_source = {}
        
        # Dictionary of cache sizes keyed by node
        self.cache_size = {}
        
        self.colla_table = {}

        # Dictionary of link types (internal/external)
        self.link_type = nx.get_edge_attributes(topology.to_directed(), 'type')
        
        self.link_delay = fnss.get_delays(topology.to_directed())
        
        policy_name = topology.graph['cache_policy']
        # Initialize attributes
        for node in topology.nodes_iter():
            stack_name, stack_props = fnss.get_stack(topology, node)
            if stack_name == 'cache':
                self.cache_size[node] = stack_props['size']
                self.colla_table[node] = {}
            elif stack_name == 'source':
                contents = stack_props['contents']
                for content in contents:
                    self.content_source[content] = node
        cache_size = dict((node, fnss.get_stack(topology, node)[1]['size'])
                          for node in topology.nodes_iter()
                          if fnss.get_stack(topology, node)[0] == 'cache')
        # The actual cache object storing the content
        self.caches = dict((node, cache_policy_register[policy_name](cache_size[node]))
                            for node in cache_size)
示例#2
0
    def __init__(self, topology, cache_policy, shortest_path=None):
        """Constructor

        Parameters
        ----------
        topology : fnss.Topology
            The topology object
        cache_policy : dict or Tree
            cache policy descriptor. It has the name attribute which identify
            the cache policy name and keyworded arguments specific to the
            policy
        shortest_path : dict of dict, optional
            The all-pair shortest paths of the network
        """
        # Filter inputs
        if not isinstance(topology, fnss.Topology):
            raise ValueError('The topology argument must be an instance of '
                             'fnss.Topology or any of its subclasses.')

        # Shortest paths of the network
        self.shortest_path = shortest_path if shortest_path is not None \
                             else symmetrify_paths(nx.all_pairs_dijkstra_path(topology))

        # Network topology
        self.topology = topology

        # Dictionary mapping each content object to its source
        # dict of location of contents keyed by content ID
        self.content_source = {}
        # Dictionary mapping the reverse, i.e. nodes to set of contents stored
        self.source_node = {}

        # Dictionary of link types (internal/external)
        self.link_type = nx.get_edge_attributes(topology, 'type')
        self.link_delay = fnss.get_delays(topology)
        # Instead of this manual assignment, I could have converted the
        # topology to directed before extracting type and link delay but that
        # requires a deep copy of the topology that can take long time if
        # many content source mappings are included in the topology
        if not topology.is_directed():
            for (u, v), link_type in list(self.link_type.items()):
                self.link_type[(v, u)] = link_type
            for (u, v), delay in list(self.link_delay.items()):
                self.link_delay[(v, u)] = delay

        cache_size = {}
        for node in topology.nodes_iter():
            stack_name, stack_props = fnss.get_stack(topology, node)
            if stack_name == 'router':
                if 'cache_size' in stack_props:
                    cache_size[node] = stack_props['cache_size']
            elif stack_name == 'receiver':
                if 'cache_size' in stack_props:
                    cache_size[node] = stack_props['cache_size']
            elif stack_name == 'source':
                if 'contents' in stack_props.keys():
                    contents = stack_props['contents']
                    self.source_node[node] = contents
                    for content in contents:
                        self.content_source[content] = node
        if any(c < 1 for c in cache_size.values()):
            logger.warn('Some content caches have size equal to 0. '
                        'I am setting them to 1 and run the experiment anyway')
            for node in cache_size:
                if cache_size[node] < 1:
                    cache_size[node] = 1

        policy_name = cache_policy['name']
        policy_args = {k: v for k, v in cache_policy.items() if k != 'name'}
        # The actual cache objects storing the content
        self.cache = {
            node: CACHE_POLICY[policy_name](cache_size[node], **policy_args)
            for node in cache_size
        }

        # Content-Download Information (Key=content, Value=number of downloads)
        self.central_download_table = collections.Counter()
        self.user_download_table = collections.Counter()

        # Content-Caching Information (Key=content, Value=number of times being cached)
        self.central_cache_table = collections.Counter()
        self.user_cache_table = collections.Counter()

        # This is for a local un-coordinated cache (currently used only by
        # Hashrouting with edge cache)
        self.local_cache = {}

        # Keep track of nodes and links removed to simulate failures
        self.removed_nodes = {}
        # This keeps track of neighbors of a removed node at the time of removal.
        # It is needed to ensure that when the node is restored only links that
        # were removed as part of the node removal are restored and to prevent
        # restoring nodes that were removed manually before removing the node.
        self.disconnected_neighbors = {}
        self.removed_links = {}
        self.removed_sources = {}
        self.removed_caches = {}
        self.removed_local_caches = {}
示例#3
0
 def __init__(self, topology, cache_policy, shortest_path=None):
     """Constructor
     
     Parameters
     ----------
     topology : fnss.Topology
         The topology object
     cache_policy : dict or Tree
         cache policy descriptor. It has the name attribute which identify
         the cache policy name and keyworded arguments specific to the
         policy
     shortest_path : dict of dict, optional
         The all-pair shortest paths of the network
     """
     # Filter inputs
     if not isinstance(topology, fnss.Topology):
         raise ValueError('The topology argument must be an instance of '
                          'fnss.Topology or any of its subclasses.')
     
     # Shortest paths of the network
     self.shortest_path = shortest_path if shortest_path is not None \
                          else symmetrify_paths(nx.all_pairs_dijkstra_path(topology))
     
     # Network topology
     self.topology = topology
     
     # Dictionary mapping each content object to its source
     # dict of location of contents keyed by content ID
     self.content_source = {}
     
     # Dictionary of cache sizes keyed by node
     self.cache_size = {}
     
     # Dictionary of link types (internal/external)
     self.link_type = nx.get_edge_attributes(topology, 'type')
     self.link_delay = fnss.get_delays(topology)
     # Instead of this manual assignment, I could have converted the
     # topology to directed before extracting type and link delay but that
     # requires a deep copy of the topology that can take long time if
     # many content source mappings are included in the topology
     if not topology.is_directed():
         for (u, v), link_type in self.link_type.items():
             self.link_type[(v, u)] = link_type
         for (u, v), delay in self.link_delay.items():
             self.link_delay[(v, u)] = delay
             
     # Initialize attributes
     for node in topology.nodes_iter():
         stack_name, stack_props = fnss.get_stack(topology, node)
         if stack_name == 'router':
             if 'cache_size' in stack_props:
                 self.cache_size[node] = stack_props['cache_size']
         elif stack_name == 'source':
             contents = stack_props['contents']
             for content in contents:
                 self.content_source[content] = node
     if any(c < 1 for c in self.cache_size.values()):
         logger.warn('Some content caches have size equal to 0. '
                       'I am setting them to 1 and run the experiment anyway')
         for node in self.cache_size:
             if self.cache_size[node] < 1:    
                 self.cache_size[node] = 1
                 
     policy_name = cache_policy['name']
     policy_args = {k: v for k, v in cache_policy.items() if k != 'name'}
     # The actual cache objects storing the content
     self.cache = {node: CACHE_POLICY[policy_name](self.cache_size[node], **policy_args)
                       for node in self.cache_size}
示例#4
0
    def __init__(self, topology, cache_policy, n_services, rate, seed=0, shortest_path=None):
        """Constructor

        Parameters
        ----------
        topology : fnss.Topology
            The topology object
        cache_policy : dict or Tree
            cache policy descriptor. It has the name attribute which identify
            the cache policy name and keyworded arguments specific to the
            policy
        shortest_path : dict of dict, optional
            The all-pair shortest paths of the network
        """
        # Filter inputs
        if not isinstance(topology, fnss.Topology):
            raise ValueError('The topology argument must be an instance of '
                             'fnss.Topology or any of its subclasses.')

        # Shortest paths of the network
        self.shortest_path = shortest_path if shortest_path is not None \
                             else symmetrify_paths(nx.all_pairs_dijkstra_path(topology))

        # Network topology
        self.topology = topology

        # Dictionary mapping each content object to its source
        # dict of location of contents keyed by content ID
        self.content_source = {}
        # Dictionary mapping the reverse, i.e. nodes to set of contents stored
        self.source_node = {}

        # A heap with events (see Event class above)
        self.eventQ = []

        # Dictionary of link types (internal/external)
        self.link_type = nx.get_edge_attributes(topology, 'type')
        self.link_delay = fnss.get_delays(topology)
        # Instead of this manual assignment, I could have converted the
        # topology to directed before extracting type and link delay but that
        # requires a deep copy of the topology that can take long time if
        # many content source mappings are included in the topology
        if not topology.is_directed():
            for (u, v), link_type in list(self.link_type.items()):
                self.link_type[(v, u)] = link_type
            for (u, v), delay in list(self.link_delay.items()):
                self.link_delay[(v, u)] = delay

        cache_size = {}
        comp_size = {}
        for node in topology.nodes_iter():
            stack_name, stack_props = fnss.get_stack(topology, node)
            if stack_name == 'router':
                if 'cache_size' in stack_props:
                    cache_size[node] = stack_props['cache_size']
                if 'computation_size' in stack_props:
                    comp_size[node] = stack_props['computation_size']
            elif stack_name == 'source' and 'contents' in stack_props:
                contents = stack_props['contents']
                #print "Node: " + repr(node)
                self.source_node[node] = contents
                #print ("Source node: " + repr(node) + " hosts: " + repr(contents))
                for content in contents:
                    #self.content_source[content] = node
                    # Replaced above line with the below 4 (Onur)
                    if content not in self.content_source:
                        self.content_source[content] = [node]
                    else:
                        self.content_source[content].append(node)
        if any(c < 1 for c in cache_size.values()):
            logger.warn('Some content caches have size equal to 0. '
                        'I am setting them to 1 and run the experiment anyway')
            for node in cache_size:
                if cache_size[node] < 1:
                    cache_size[node] = 1

        # Generate the actual services processing requests
        self.services = []
        self.n_services = n_services
        internal_link_delay = 0.002 # This is the delay from receiver to router
        
        """
        # Hardcoded generation (for testing): 

        # Service 0 (not used)
        service_time = 0.5
        deadline = 0.5+0.040 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 1:
        service_time = 0.5
        deadline = 0.5 + 0.035 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 2:
        service_time = 0.5
        deadline = 0.5+0.030 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 3:
        service_time = 0.5
        deadline = 0.5+0.025 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 4:
        service_time = 0.5
        deadline = 0.5+0.020 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 5:
        service_time = 0.5
        deadline = 0.5+0.015 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 6:
        service_time = 0.5
        deadline = 0.5+0.015 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 7:
        service_time = 0.5
        deadline = 0.5+0.015 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 8:
        service_time = 0.5
        deadline = 0.5+0.010 + 2*internal_link_delay
        s = Service(service_time, deadline)
        self.services.append(s)
        # Service 9:
        
       
        indx=0
        for service in self.services:
            if indx is 0:
                indx+=1
                continue
            print "Service: " + repr(indx) + " Deadline: " +repr(self.services[indx].deadline) + " Service Time: " + repr(self.services[indx].service_time)
            indx+=1
        """
        
        #""" GENERATE Services automatically using min, max ranges for service times and deadlines
        service_time_min = 0.1
        service_time_max = 0.1
        delay_min = 0.005
        delay_max = 0.05 #0.015*2+0.005*4

        #aFile = open('services.txt', 'w')
        #aFile.write("# ServiceID\tserviceTime\tserviceDeadline\n")

        service_indx = 0
        deadlines = []
        service_times = []
        random.seed(seed)

        for service in range(0, n_services):
            source_list = self.content_source[service]
            service_time = random.uniform(service_time_min, service_time_max)/len(source_list)
            deadline = service_time + random.uniform(delay_min, delay_max) + 2*internal_link_delay
            deadlines.append(deadline)
            service_times.append(service_time)

        #deadlines = sorted(deadlines)
        for service in range(0, n_services):
            service_time = service_times[service_indx]
            deadline = deadlines[service_indx]

            #s = str(service_indx) + "\t" + str(service_time) + "\t" + str(deadline) + "\n"
            #aFile.write(s)
            service_indx += 1
            s = Service(service_time, deadline)
            self.services.append(s)
        #aFile.close()
        #""" #END OF Generating Services

        self.compSpot = {node: ComputationalSpot(comp_size[node], n_services, self.services, node,  None) 
                            for node in comp_size}

        policy_name = cache_policy['name']
        policy_args = {k: v for k, v in cache_policy.items() if k != 'name'}
        # The actual cache objects storing the content
        self.cache = {node: CACHE_POLICY[policy_name](cache_size[node], **policy_args)
                          for node in cache_size}

        # This is for a local un-coordinated cache (currently used only by
        # Hashrouting with edge cache)
        self.local_cache = {}


        # Keep track of nodes and links removed to simulate failures
        self.removed_nodes = {}
        # This keeps track of neighbors of a removed node at the time of removal.
        # It is needed to ensure that when the node is restored only links that
        # were removed as part of the node removal are restored and to prevent
        # restoring nodes that were removed manually before removing the node.
        self.disconnected_neighbors = {}
        self.removed_links = {}
        self.removed_sources = {}
        self.removed_caches = {}
        self.removed_local_caches = {}
示例#5
0
    def __init__(self, topology, cache_policy, shortest_path=None):
        """Constructor
        
        Parameters
        ----------
        topology : fnss.Topology
            The topology object
        cache_policy : dict or Tree
            cache policy descriptor. It has the name attribute which identify
            the cache policy name and keyworded arguments specific to the
            policy
        shortest_path : dict of dict, optional
            The all-pair shortest paths of the network
        """
        # Filter inputs
        if not isinstance(topology, fnss.Topology):
            raise ValueError('The topology argument must be an instance of '
                             'fnss.Topology or any of its subclasses.')

        # Shortest paths of the network
        self.shortest_path = shortest_path if shortest_path is not None \
                             else symmetrify_paths(nx.all_pairs_dijkstra_path(topology))

        # Network topology
        self.topology = topology

        # Dictionary mapping each content object to its source
        # dict of location of contents keyed by content ID
        self.content_source = {}

        # Dictionary of cache sizes keyed by node
        self.cache_size = {}

        # Dictionary of link types (internal/external)
        self.link_type = nx.get_edge_attributes(topology, 'type')
        self.link_delay = fnss.get_delays(topology)
        # Instead of this manual assignment, I could have converted the
        # topology to directed before extracting type and link delay but that
        # requires a deep copy of the topology that can take long time if
        # many content source mappings are included in the topology
        if not topology.is_directed():
            for (u, v), link_type in self.link_type.items():
                self.link_type[(v, u)] = link_type
            for (u, v), delay in self.link_delay.items():
                self.link_delay[(v, u)] = delay

        # Initialize attributes
        for node in topology.nodes_iter():
            stack_name, stack_props = fnss.get_stack(topology, node)
            if stack_name == 'router':
                if 'cache_size' in stack_props:
                    self.cache_size[node] = stack_props['cache_size']
                #print "router node:"
                #print node
            elif stack_name == 'source':
                #print "cache node application's name:"
                #print fnss.get_application_names(topology, node)
                #print "content node:"
                #print node
                contents = stack_props['contents']
                self.cache_size[node] = 1
                for content in contents:
                    self.content_source[content] = node
        if any(c < 1 for c in self.cache_size.values()):
            logger.warn('Some content caches have size equal to 0. '
                        'I am setting them to 1 and run the experiment anyway')
            for node in self.cache_size:
                if self.cache_size[node] < 1:
                    self.cache_size[node] = 1

        policy_name = cache_policy['name']
        policy_args = {k: v for k, v in cache_policy.items() if k != 'name'}
        # The actual cache objects storing the content
        self.cache = {
            node: CACHE_POLICY[policy_name](self.cache_size[node],
                                            **policy_args)
            for node in self.cache_size
        }

        #print "all the actual cache objects storing the contents"
        #for node in self.cache:
        #    print (node, self.cache_size[node])
        # store neighbours as dictionary of lists, also importing methods from networkx.
        self.neighbours = {}
        for n in self.cache:
            self.neighbours[n] = nx.neighbors(topology, n)
示例#6
0
文件: jfed.py 项目: wangxi0414/fnss
def to_jfed(topology,
            path,
            testbed="wall1.ilabt.iminds.be",
            encoding="utf-8",
            prettyprint=True):
    """Convert a topology object into an RSPEC file for jFed
    
    Parameters
    ----------
    topology : Topology
        The topology object
    path : str
        The file to which the RSPEC will be written
    testbed : str, optional
        URI of the testbed to use
    encoding : str, optional
        The encoding of the target file
    prettyprint : bool, optional
        Indent the XML code in the output file
        
    Notes
    -----
    It currently supports only undirected topologies, if a topology is directed
    it is converted to undirected
    """
    if topology.is_directed():
        topology = topology.to_undirected()
    topology = nx.convert_node_labels_to_integers(topology)

    if 'capacity_unit' in topology.graph:
        capacity_norm = units.capacity_units[
            topology.graph['capacity_unit']] / units.capacity_units['Kbps']
    if 'delay_unit' in topology.graph:
        delay_norm = units.time_units[
            topology.graph['delay_unit']] / units.time_units['ms']
    delays = get_delays(topology)
    capacities = get_capacities(topology)
    # Node positions (randomly generated)
    pos = nx.random_layout(topology)
    # Create mapping between links and interface IDs
    if_names = {}
    for v in topology.edge:
        next_hops = sorted(topology.edge[v].keys())
        if_names[v] = dict((next_hops[i], i) for i in range(len(next_hops)))
    head = ET.Element('rspec')
    head.attrib["generated_by"] = "FNSS"
    head.attrib[
        'xsi:schemaLocation'] = "http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/request.xsd"
    head.attrib['xmlns'] = "http://www.geni.net/resources/rspec/3"
    head.attrib["xmlns:jFed"] = "http://jfed.iminds.be/rspec/ext/jfed/1"
    head.attrib[
        "xmlns:jFedBonfire"] = "http://jfed.iminds.be/rspec/ext/jfed-bonfire/1"
    head.attrib[
        "xmlns:delay"] = "http://www.protogeni.net/resources/rspec/ext/delay/1"
    head.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
    # Iterate over nodes
    for v in topology.nodes_iter():
        node = ET.SubElement(head, 'node')
        node.attrib['client_id'] = "node%s" % str(v)
        node.attrib[
            'component_manager_id'] = "urn:publicid:IDN+%s+authority+cm" % testbed
        node.attrib["exclusive"] = "true"
        sliver_type = ET.SubElement(node, 'sliver_type')
        sliver_type.attrib['name'] = topology.node[v][
            'sliver_type'] if 'sliver_type' in topology.node[v] else 'raw-pc'
        location = ET.SubElement(node, 'jFed:location')
        x, y = pos[v]
        location.attrib['x'] = str(1000 * x)
        location.attrib['y'] = str(500 * y)
        for if_name in if_names[v].values():
            interface = ET.SubElement(node, 'interface')
            interface.attrib['client_id'] = "node%s:if%s" % (str(v),
                                                             str(if_name))
    # The convention in jFed is to identify links with "linkX" where X is an
    # integer but making sure that links and nodes have different integers
    link_id = topology.number_of_nodes() - 1
    for u, v in topology.edges_iter():
        link_id += 1
        link = ET.SubElement(head, 'link')
        link.attrib['client_id'] = "link%s" % str(link_id)
        component_manager = ET.SubElement(link, 'component_manager')
        component_manager.attrib[
            'name'] = "urn:publicid:IDN+%s+authority+cm" % testbed
        u_if = "node%s:if%s" % (str(u), str(if_names[u][v]))
        v_if = "node%s:if%s" % (str(v), str(if_names[v][u]))
        for source, dest in ((u_if, v_if), (v_if, u_if)):
            prop = ET.SubElement(link, 'property')
            prop.attrib["source_id"] = source
            prop.attrib["dest_id"] = dest
            if (u, v) in delays:
                prop.attrib['latency'] = str(delay_norm * delays[(u, v)])
            if (u, v) in capacities:
                prop.attrib['capacity'] = str(capacity_norm *
                                              capacities[(u, v)])
            interface_ref = ET.SubElement(link, 'interface_ref')
            interface_ref.attrib['client_id'] = source
    if prettyprint:
        util.xml_indent(head)
    ET.ElementTree(head).write(path, encoding=encoding)
示例#7
0
文件: jfed.py 项目: fnss/fnss
def to_jfed(topology, path, testbed="wall1.ilabt.iminds.be", encoding="utf-8", prettyprint=True):
    """Convert a topology object into an RSPEC file for jFed

    Parameters
    ----------
    topology : Topology
        The topology object
    path : str
        The file to which the RSPEC will be written
    testbed : str, optional
        URI of the testbed to use
    encoding : str, optional
        The encoding of the target file
    prettyprint : bool, optional
        Indent the XML code in the output file

    Notes
    -----
    It currently supports only undirected topologies, if a topology is directed
    it is converted to undirected
    """
    if topology.is_directed():
        topology = topology.to_undirected()
    topology = nx.convert_node_labels_to_integers(topology)

    if 'capacity_unit' in topology.graph:
        capacity_norm = units.capacity_units[topology.graph['capacity_unit']] / units.capacity_units['Kbps']
    if 'delay_unit' in topology.graph:
        delay_norm = units.time_units[topology.graph['delay_unit']] / units.time_units['ms']
    delays = get_delays(topology)
    capacities = get_capacities(topology)
    # Node positions (randomly generated)
    pos = nx.random_layout(topology)
    # Create mapping between links and interface IDs
    if_names = {}
    for v in topology.adj:
        next_hops = sorted(topology.adj[v].keys())
        if_names[v] = {next_hop: i for i, next_hop in enumerate(next_hops)}
    head = ET.Element('rspec')
    head.attrib["generated_by"] = "FNSS"
    head.attrib['xsi:schemaLocation'] = "http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/request.xsd"
    head.attrib['xmlns'] = "http://www.geni.net/resources/rspec/3"
    head.attrib["xmlns:jFed"] = "http://jfed.iminds.be/rspec/ext/jfed/1"
    head.attrib["xmlns:jFedBonfire"] = "http://jfed.iminds.be/rspec/ext/jfed-bonfire/1"
    head.attrib["xmlns:delay"] = "http://www.protogeni.net/resources/rspec/ext/delay/1"
    head.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
    # Iterate over nodes
    for v in topology.nodes():
        node = ET.SubElement(head, 'node')
        node.attrib['client_id'] = "node%s" % str(v)
        node.attrib['component_manager_id'] = "urn:publicid:IDN+%s+authority+cm" % testbed
        node.attrib["exclusive"] = "true"
        sliver_type = ET.SubElement(node, 'sliver_type')
        sliver_type.attrib['name'] = topology.node[v]['sliver_type'] if 'sliver_type' in topology.node[v] else 'raw-pc'
        location = ET.SubElement(node, 'jFed:location')
        x, y = pos[v]
        location.attrib['x'] = str(1000 * x)
        location.attrib['y'] = str(500 * y)
        for if_name in if_names[v].values():
            interface = ET.SubElement(node, 'interface')
            interface.attrib['client_id'] = "node%s:if%s" % (str(v), str(if_name))
    # The convention in jFed is to identify links with "linkX" where X is an
    # integer but making sure that links and nodes have different integers
    link_id = topology.number_of_nodes() - 1
    for u, v in topology.edges():
        link_id += 1
        link = ET.SubElement(head, 'link')
        link.attrib['client_id'] = "link%s" % str(link_id)
        component_manager = ET.SubElement(link, 'component_manager')
        component_manager.attrib['name'] = "urn:publicid:IDN+%s+authority+cm" % testbed
        u_if = "node%s:if%s" % (str(u), str(if_names[u][v]))
        v_if = "node%s:if%s" % (str(v), str(if_names[v][u]))
        for source, dest in ((u_if, v_if), (v_if, u_if)):
            prop = ET.SubElement(link, 'property')
            prop.attrib["source_id"] = source
            prop.attrib["dest_id"] = dest
            if (u, v) in delays:
                prop.attrib['latency'] = str(delay_norm * delays[(u, v)])
            if (u, v) in capacities:
                prop.attrib['capacity'] = str(capacity_norm * capacities[(u, v)])
            interface_ref = ET.SubElement(link, 'interface_ref')
            interface_ref.attrib['client_id'] = source
    if prettyprint:
        util.xml_indent(head)
    ET.ElementTree(head).write(path, encoding=encoding)
示例#8
0
    def __init__(self,
                 topology,
                 cache_policy,
                 sched_policy,
                 n_services,
                 rate,
                 seed=0,
                 shortest_path=None):
        """Constructor

        Parameters
        ----------
        topology : fnss.Topology
            The topology object
        cache_policy : dict or Tree
            cache policy descriptor. It has the name attribute which identify
            the cache policy name and keyworded arguments specific to the
            policy
        shortest_path : dict of dict, optional
            The all-pair shortest paths of the network
        """
        # Filter inputs
        if not isinstance(topology, fnss.Topology):
            raise ValueError('The topology argument must be an instance of '
                             'fnss.Topology or any of its subclasses.')

        # Shortest paths of the network
        self.shortest_path = shortest_path if shortest_path is not None \
                             else symmetrify_paths(nx.all_pairs_dijkstra_path(topology))

        # Network topology
        self.topology = topology
        self.topology_depth = 0

        # Dictionary mapping each content object to its source
        # dict of location of contents keyed by content ID
        self.content_source = {}
        # Dictionary mapping the reverse, i.e. nodes to set of contents stored
        self.source_node = {}

        # A heap with events (see Event class above)
        self.eventQ = []

        # Dictionary of link types (internal/external)
        self.link_type = nx.get_edge_attributes(topology, 'type')
        self.link_delay = fnss.get_delays(topology)
        # Instead of this manual assignment, I could have converted the
        # topology to directed before extracting type and link delay but that
        # requires a deep copy of the topology that can take long time if
        # many content source mappings are included in the topology
        if not topology.is_directed():
            for (u, v), link_type in list(self.link_type.items()):
                self.link_type[(v, u)] = link_type
            for (u, v), delay in list(self.link_delay.items()):
                self.link_delay[(v, u)] = delay

        cache_size = {}
        comp_size = {}
        service_size = {}
        self.rate = rate
        for node in topology.nodes_iter():
            stack_name, stack_props = fnss.get_stack(topology, node)
            # get the depth of the tree
            if stack_name == 'router' and 'depth' in self.topology[node].keys(
            ):
                depth = self.topology.node[node]['depth']
                if depth > self.topology_depth:
                    self.topology_depth = depth
            # get computation size per depth
            if stack_name == 'router':
                if 'cache_size' in stack_props:
                    cache_size[node] = stack_props['cache_size']
                if 'computation_size' in stack_props:
                    comp_size[node] = stack_props['computation_size']
                if 'service_size' in stack_props:
                    service_size[node] = stack_props['service_size']
            elif stack_name == 'source':  # A Cloud with infinite resources
                comp_size[node] = float('inf')
                service_size[node] = float('inf')
                contents = stack_props['contents']
                self.source_node[node] = contents
                for content in contents:
                    self.content_source[content] = node
        if any(c < 1 for c in cache_size.values()):
            logger.warn('Some content caches have size equal to 0. '
                        'I am setting them to 1 and run the experiment anyway')
            for node in cache_size:
                if cache_size[node] < 1:
                    cache_size[node] = 1

        policy_name = cache_policy['name']
        policy_args = {k: v for k, v in cache_policy.items() if k != 'name'}
        # The actual cache objects storing the content
        self.cache = {
            node: CACHE_POLICY[policy_name](cache_size[node], **policy_args)
            for node in cache_size
        }

        # Generate the actual services processing requests
        self.services = []
        self.n_services = n_services
        internal_link_delay = 0.001  # This is the delay from receiver to router

        service_time_min = 0.10  # used to be 0.001
        service_time_max = 0.10  # used to be 0.1
        #delay_min = 0.005
        delay_min = 2 * topology.graph[
            'receiver_access_delay'] + service_time_max
        delay_max = delay_min + 2 * topology.graph['depth'] * topology.graph[
            'link_delay'] + 0.005

        service_indx = 0
        random.seed(seed)
        for service in range(0, n_services):
            service_time = random.uniform(service_time_min, service_time_max)
            #service_time = 2*random.uniform(service_time_min, service_time_max)
            deadline = random.uniform(delay_min,
                                      delay_max) + 2 * internal_link_delay
            #deadline = service_time + 1.5*(random.uniform(delay_min, delay_max) + 2*internal_link_delay)
            s = Service(service_time, deadline)
            #print ("Service " + str(service) + " has a deadline of " + str(deadline))
            self.services.append(s)
        #""" #END OF Generating Services

        ### Prepare input for the optimizer
        if False:
            aFile = open('inputToOptimizer.txt', 'w')
            aFile.write("# 1. ServiceIDs\n")
            first = True
            tostr = ""
            for service in range(0, n_services):
                if first:
                    tostr += str(service)
                    first = False
                else:
                    tostr += "," + str(service)
            aFile.write(s)

            aFile.write("# 2. Set of APs:\n")
            first = True
            tostr = ""
            for ap in topology.graph['receivers']:
                if first:
                    tostr = str(ap)
                    first = False
                else:
                    tostr += "," + str(ap)
            tostr += '\n'
            aFile.write(tostr)

            aFile.write("# 3. Set of nodes:\n")
            first = True
            tostr = ""
            for node in topology.nodes_iter():
                if node in topology.graph['receivers']:
                    continue
                if first:
                    tostr = str(node)
                    first = False
                else:
                    tostr = "," + str(node)
            tostr += '\n'
            aFile.write(tostr)

            aFile.write("# 4. NodeID, serviceID, numCores\n")
            if topology.graph['type'] == 'TREE':
                ap_node_to_services = {}
                ap_node_to_delay = {}
                for ap in topology.graph['receivers']:
                    node_to_delay = {}
                    node_to_services = {}
                    node_to_delay[ap] = 0.0
                    ap_node_to_services[ap] = node_to_services
                    ap_node_to_delay[ap] = node_to_delay
                    for node in topology.nodes_iter():
                        for egress, ingress in topology.edges_iter():
                            #print str(ingress) + " " + str(egress)
                            if ingress in node_to_delay.keys(
                            ) and egress not in node_to_delay.keys():
                                node_to_delay[egress] = node_to_delay[
                                    ingress] + topology.edge[ingress][egress][
                                        'delay']
                                node_to_services[egress] = []
                                service_indx = 0
                                for s in self.services:
                                    if s.deadline >= (s.service_time + 2 *
                                                      node_to_delay[egress]):
                                        node_to_services[egress].append(
                                            service_indx)
                                    service_indx += 1
                aFile.write("# 4. Ap,Node,service1,service2, ....]\n")
                for ap in topology.graph['receivers']:
                    node_to_services = ap_node_to_services[ap]
                    node_to_delay = ap_node_to_delay[ap]
                    for node, services in node_to_services.items():
                        s = str(ap) + "," + str(
                            node)  #+ "," + str(node_to_delay[node])
                        for serv in services:
                            s += "," + str(serv)
                        s += '\n'
                        aFile.write(s)
                aFile.write(
                    "# 5. AP, rate_service1, rate_service2, ... rate_serviceN\n"
                )
                rate = 1.0 / (len(topology.graph['receivers']) *
                              len(self.services))
                for ap in topology.graph['receivers']:
                    s = str(ap) + ","
                    for serv in self.services:
                        s += str(rate)
                    s += '\n'
                    aFile.write(s)

            aFile.close()
        ComputationSpot.services = self.services
        self.compSpot = {
            node: ComputationSpot(self, comp_size[node], service_size[node],
                                  self.services, node, sched_policy, None)
            for node in comp_size
        }
        #print ("Generated Computation Spot Objects")
        sys.stdout.flush()
        # This is for a local un-coordinated cache (currently used only by
        # Hashrouting with edge cache)
        self.local_cache = {}

        # Keep track of nodes and links removed to simulate failures
        self.removed_nodes = {}
        # This keeps track of neighbors of a removed node at the time of removal.
        # It is needed to ensure that when the node is restored only links that
        # were removed as part of the node removal are restored and to prevent
        # restoring nodes that were removed manually before removing the node.
        self.disconnected_neighbors = {}
        self.removed_links = {}
        self.removed_sources = {}
        self.removed_caches = {}
        self.removed_local_caches = {}
示例#9
0
    def __init__(self,
                 topology,
                 cache_policy,
                 n_contents=0,
                 shortest_path=None,
                 rl_algorithm=None):
        """Constructor

        Parameters
        ----------
        topology : fnss.Topology
            The topology object
        cache_policy : dict or Tree
            cache policy descriptor. It has the name attribute which identify
            the cache policy name and keyworded arguments specific to the
            policy
        shortest_path : dict of dict, optional
            The all-pair shortest paths of the network
        rl_algorithm : dict of dict, optional
            Name of the RL algorithm used along with initialization parameters
        """
        # Filter inputs
        if not isinstance(topology, fnss.Topology):
            raise ValueError('The topology argument must be an instance of '
                             'fnss.Topology or any of its subclasses.')

        # Shortest paths of the network
        self.shortest_path = dict(shortest_path) if shortest_path is not None \
                             else symmetrify_paths(dict(nx.all_pairs_dijkstra_path(topology)))

        # Network topology
        self.topology = topology

        # Number of contents
        self.n_contents = n_contents
        self.POPULARITY = self.POPULARITY or {
            i: 0
            for i in range(1, n_contents + 1)
        }

        # Dictionary mapping each content object to its source
        # dict of location of contents keyed by content ID
        self.content_source = {}
        # Dictionary mapping the reverse, i.e. nodes to set of contents stored
        self.source_node = {}

        # Dictionary of link types (internal/external)
        self.link_type = nx.get_edge_attributes(topology, 'type')
        self.link_delay = fnss.get_delays(topology)
        # Instead of this manual assignment, I could have converted the
        # topology to directed before extracting type and link delay but that
        # requires a deep copy of the topology that can take long time if
        # many content source mappings are included in the topology
        if not topology.is_directed():
            for (u, v), link_type in list(self.link_type.items()):
                self.link_type[(v, u)] = link_type
            for (u, v), delay in list(self.link_delay.items()):
                self.link_delay[(v, u)] = delay

        cache_size = {}
        for node in topology.nodes():
            stack_name, stack_props = fnss.get_stack(topology, node)
            if stack_name == 'router':
                if 'cache_size' in stack_props:
                    cache_size[node] = stack_props['cache_size']
            elif stack_name == 'source':
                contents = stack_props['contents']
                self.source_node[node] = contents
                for content in contents:
                    self.content_source[content] = node
        if any(c < 1 for c in cache_size.values()):
            logger.warn('Some content caches have size equal to 0. '
                        'I am setting them to 1 and run the experiment anyway')
            for node in cache_size:
                if cache_size[node] < 1:
                    cache_size[node] = 1

        policy_name = cache_policy['name']
        policy_args = {k: v for k, v in cache_policy.items() if k != 'name'}
        # The actual cache objects storing the content
        self.cache = {
            node: CACHE_POLICY[policy_name](cache_size[node], **policy_args)
            for node in cache_size
        }

        # The intelligent agent for each router using RL
        self.ai_models = {}
        if rl_algorithm:
            if self.CACHE:
                for _, model in NetworkModel.CACHE.items():
                    model.network_model = self
                self.ai_models = NetworkModel.CACHE
            else:
                states = (2**n_contents) * n_contents
                actions = 2**n_contents
                self.CACHE = self.ai_models = {
                    node: RL_ALGO[rl_algorithm['name']](states,
                                                        actions,
                                                        node=node,
                                                        network_model=self)
                    for node in cache_size
                }

        # In case of AI this dict stores observation to train the mode with the current state which
        # would be the "next_state"
        self.observations = {}

        # This is for a local un-coordinated cache (currently used only by
        # Hashrouting with edge cache)
        self.local_cache = {}

        # Keep track of nodes and links removed to simulate failures
        self.removed_nodes = {}
        # This keeps track of neighbors of a removed node at the time of removal.
        # It is needed to ensure that when the node is restored only links that
        # were removed as part of the node removal are restored and to prevent
        # restoring nodes that were removed manually before removing the node.
        self.disconnected_neighbors = {}
        self.removed_links = {}
        self.removed_sources = {}
        self.removed_caches = {}
        self.removed_local_caches = {}
示例#10
0
    def __init__(self, topology, cache_policy, betw=None, shortest_path=None):
        """Constructor

        Parameters
        ----------
        topology : fnss.Topology
            The topology object
        cache_policy : dict or Tree
            cache policy descriptor. It has the name attribute which identify
            the cache policy name and keyworded arguments specific to the
            policy
        shortest_path : dict of dict, optional
            The all-pair shortest paths of the network
        """
        # Filter inputs
        if not isinstance(topology, fnss.Topology):
            raise ValueError('The topology argument must be an instance of '
                             'fnss.Topology or any of its subclasses.')

        # Shortest paths of the network
        self.shortest_path = shortest_path if shortest_path is not None \
                             else symmetrify_paths(nx.all_pairs_dijkstra_path(topology))

        # Network topology
        self.topology = topology

        # Dictionary mapping each content object to its source
        # dict of location of contents keyed by content ID
        self.content_source = {}
        # Dictionary mapping the reverse, i.e. nodes to set of contents stored
        self.source_node = {}

        # Dictionary of link types (internal/external)
        self.link_type = nx.get_edge_attributes(topology, 'type')
        self.link_delay = fnss.get_delays(topology)
        # Instead of this manual assignment, I could have converted the
        # topology to directed before extracting type and link delay but that
        # requires a deep copy of the topology that can take long time if
        # many content source mappings are included in the topology
        if not topology.is_directed():
            for (u, v), link_type in list(self.link_type.items()):
                self.link_type[(v, u)] = link_type
            for (u, v), delay in list(self.link_delay.items()):
                self.link_delay[(v, u)] = delay

        cache_size = {}
        for node in topology.nodes_iter():
            stack_name, stack_props = fnss.get_stack(topology, node)
            if stack_name == 'router':
                if 'cache_size' in stack_props:
                    cache_size[node] = stack_props['cache_size']
            elif stack_name == 'source':
                contents = stack_props['contents']
                self.source_node[node] = contents
                for content in contents:
                    self.content_source[content] = node
        if any(c < 1 for c in cache_size.values()):
            logger.warn('Some content caches have size equal to 0. '
                        'I am setting them to 1 and run the experiment anyway')
            for node in cache_size:
                if cache_size[node] < 1:
                    cache_size[node] = 1


        #print 'cache_policy=', cache_policy

        policy_name = cache_policy['name']
        policy_args = {k: v for k, v in cache_policy.items() if k != 'name'}


        max_betw = -1
        avg_betw = 0
        for node in betw:
            avg_betw += betw[node]
            if betw[node] > max_betw:
                max_betw = betw[node]
        avg_betw /= len(betw)

       # sorted_betw = {}

#        print betw
        sorted_betw =  sorted(betw.items(), key=lambda value: value[1], reverse=True)
#        print sorted_betw

        
        #thres_betw = sorted_betw[2][1]
        thres_betw = avg_betw

        # The actual cache objects storing the content

        if policy_name=='HYBRID':
            hybrid=True
        else:
            hybrid=False

        if hybrid:
            self.cache ={}
            for node in cache_size:
                if betw[node] >= thres_betw:
                    self.cache[node] = CACHE_POLICY['LFU_1'](cache_size[node],  **policy_args)
                else:
                    self.cache[node] = CACHE_POLICY['LRU'](cache_size[node],  **policy_args)
        else:
            self.cache = {node: CACHE_POLICY[policy_name](cache_size[node], 
                central_router=[False,True][betw[node]>avg_betw], 
                betw=betw[node],avg_betw=avg_betw, **policy_args) 
                for node in cache_size}

        # This is for a local un-coordinated cache (currently used only by
        # Hashrouting with edge cache)
        self.local_cache = {}

        # Keep track of nodes and links removed to simulate failures
        self.removed_nodes = {}
        # This keeps track of neighbors of a removed node at the time of removal.
        # It is needed to ensure that when the node is restored only links that
        # were removed as part of the node removal are restored and to prevent
        # restoring nodes that were removed manually before removing the node.
        self.disconnected_neighbors = {}
        self.removed_links = {}
        self.removed_sources = {}
        self.removed_caches = {}
        self.removed_local_caches = {}