Example #1
0
def uniform_cache_placement_with_uCache(topology, nCache_budget, uCache_budget,
                                        **kwargs):
    """Places cache budget uniformly across cache nodes.

    Parameters
    ----------
    topology : Topology
        The topology object
    nCache_budget : int
        The cumulative nCache budget
    uCache_budget : int
        The cumulative uCache budget 
    """

    # Places nCache budget uniformly across the icr_candidates (i.e. routers)
    icr_candidates = topology.graph['icr_candidates']
    nCache_size = iround(nCache_budget / len(icr_candidates))
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = nCache_size

    # Places uCache budget uniformly across the uCache_candidates (i.e. receivers)
    uCache_candidates = topology.graph['uCache_candidates']
    uCache_size = iround(uCache_budget / len(uCache_candidates))
    for v in uCache_candidates:
        topology.node[v]['stack'][1]['cache_size'] = uCache_size
Example #2
0
def uniform_computation_placement(topology, computation_budget, service_budget, **kwargs):
    """Places computation budget uniformly across cache nodes.
    
    Parameters
    ----------
    topology : Topology
        The topology object
    computation_budget : int
        The cumulative computation budget in terms of the number of VMs
    """

    icr_candidates = topology.graph['icr_candidates']
    print("Computation budget: " + repr(computation_budget))
    print("Service budget: " + repr(service_budget))
    cache_size = iround(computation_budget/len(icr_candidates))
    service_size = iround(service_budget/len(icr_candidates))
    for v in icr_candidates:
        #if v == root:
        #    topology.node[v]['stack'][1]['service_size'] = -1
        #    topology.node[v]['stack'][1]['computation_size'] = -1
        #    topology.node[v]['stack'][1]['cache_size'] = service_budget
        #else:        
        topology.node[v]['stack'][1]['service_size'] = service_size
        topology.node[v]['stack'][1]['computation_size'] = cache_size
        topology.node[v]['stack'][1]['cache_size'] = service_size
Example #3
0
    def reserve_local_cache(self, ratio=0.1):
        """Reserve a fraction of cache as local.

        This method reserves a fixed fraction of the cache of each caching node
        to act as local uncoodinated cache. Methods `get_content` and
        `put_content` will only operated to the coordinated cache. The reserved
        local cache can be accessed with methods `get_content_local_cache` and
        `put_content_local_cache`.

        This function is currently used only by hybrid hash-routing strategies.

        Parameters
        ----------
        ratio : float
            The ratio of cache space to be reserved as local cache.
        """
        if ratio < 0 or ratio > 1:
            raise ValueError("ratio must be between 0 and 1")
        for v, c in list(self.model.cache.items()):
            maxlen = iround(c.maxlen * (1 - ratio))
            if maxlen > 0:
                self.model.cache[v] = type(c)(maxlen)
            else:
                # If the coordinated cache size is zero, then remove cache
                # from that location
                if v in self.model.cache:
                    self.model.cache.pop(v)
            local_maxlen = iround(c.maxlen * (ratio))
            if local_maxlen > 0:
                self.model.local_cache[v] = type(c)(local_maxlen)
def clustered_hashrouting_cache_placement(topology,
                                          cache_budget,
                                          n_clusters,
                                          policy,
                                          distance='delay',
                                          **kwargs):
    """Deploy caching nodes for hashrouting in with clusters

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_clusters : int
        The number of clusters
    policy : str (node_const | cluster_const)
        The expected global cache hit ratio
    distance : str
        The attribute used to quantify distance between pairs of nodes.
        Default is 'delay'
    """
    icr_candidates = topology.graph['icr_candidates']
    if n_clusters <= 0 or n_clusters > len(icr_candidates):
        raise ValueError("The number of cluster must be positive and <= the "
                         "number of ICR candidate nodes")
    elif n_clusters == 1:
        clusters = [set(icr_candidates)]
    elif n_clusters == len(icr_candidates):
        clusters = [set([v]) for v in icr_candidates]
    else:
        clusters = compute_clusters(topology,
                                    n_clusters,
                                    distance=distance,
                                    nbunch=icr_candidates,
                                    n_iter=100)
    deploy_clusters(topology, clusters, assign_src_rcv=True)
    if policy == 'node_const':
        # Each node is assigned the same amount of caching space
        cache_size = iround(cache_budget / len(icr_candidates))
        if cache_size == 0:
            return
        for v in icr_candidates:
            topology.node[v]['stack'][1]['cache_size'] = cache_size
    elif policy == 'cluster_const':
        cluster_cache_size = iround(cache_budget / n_clusters)
        for cluster in topology.graph['clusters']:
            cache_size = iround(cluster_cache_size / len(cluster))
            for v in cluster:
                if v not in icr_candidates:
                    continue
                topology.node[v]['stack'][1]['cache_size'] = cache_size
    else:
        raise ValueError('clustering policy %s not supported' % policy)
Example #5
0
def uniform_consolidated_cache_placement(topology,
                                         cache_budget,
                                         spread=0.5,
                                         metric_dict=None,
                                         target='top',
                                         **kwargs):
    """Consolidate caches in nodes with top centrality.
    
    Differently from other cache placement strategies that place cache space
    to all nodes but proportionally to their centrality, this strategy places
    caches of all the same size in a set of selected nodes.
    
    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    spread : float [0, 1], optional
        The spread factor, The greater it is the more the cache budget is
        spread among nodes. If it is 1, all candidate nodes are assigned a
        cache, if it is 0, only the node with the highest/lowest centrality
        is assigned a cache
    metric_dict : dict, optional
        The centrality metric according to which nodes are selected. If not
        specified, betweenness centrality is selected.
    target : ("top" | "bottom"), optional
        The subsection of the ranked node on which to the deploy caches.
    """
    if spread < 0 or spread > 1:
        raise ValueError('spread factor must be between 0 and 1')
    if target not in ('top', 'bottom'):
        raise ValueError('target argument must be either "top" or "bottom"')
    if metric_dict is None and spread < 1:
        metric_dict = nx.betweenness_centrality(topology)

    icr_candidates = topology.graph['icr_candidates']
    if spread == 1:
        target_nodes = icr_candidates
    else:
        nodes = sorted(icr_candidates, key=lambda k: metric_dict[k])
        if target == 'top':
            nodes = list(reversed(nodes))
        # cutoff node must be at least one otherwise, if spread is too low, no
        # nodes would be selected
        cutoff = max(1, iround(spread * len(nodes)))
        target_nodes = nodes[:cutoff]
    cache_size = iround(cache_budget / len(target_nodes))
    if cache_size == 0:
        return
    for v in target_nodes:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #6
0
def uniform_consolidated_cache_placement(topology, cache_budget, spread=0.5,
                                         metric_dict=None, target='top',
                                         **kwargs):
    """Consolidate caches in nodes with top centrality.

    Differently from other cache placement strategies that place cache space
    to all nodes but proportionally to their centrality, this strategy places
    caches of all the same size in a set of selected nodes.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    spread : float [0, 1], optional
        The spread factor, The greater it is the more the cache budget is
        spread among nodes. If it is 1, all candidate nodes are assigned a
        cache, if it is 0, only the node with the highest/lowest centrality
        is assigned a cache
    metric_dict : dict, optional
        The centrality metric according to which nodes are selected. If not
        specified, betweenness centrality is selected.
    target : ("top" | "bottom"), optional
        The subsection of the ranked node on which to the deploy caches.
    """
    if spread < 0 or spread > 1:
        raise ValueError('spread factor must be between 0 and 1')
    if target not in ('top', 'bottom'):
        raise ValueError('target argument must be either "top" or "bottom"')
    if metric_dict is None and spread < 1:
        metric_dict = nx.betweenness_centrality(topology)

    icr_candidates = topology.graph['icr_candidates']
    if spread == 1:
        target_nodes = icr_candidates
    else:
        nodes = sorted(icr_candidates, key=lambda k: metric_dict[k])
        if target == 'top':
            nodes = list(reversed(nodes))
        # cutoff node must be at least one otherwise, if spread is too low, no
        # nodes would be selected
        cutoff = max(1, iround(spread * len(nodes)))
        target_nodes = nodes[:cutoff]
    cache_size = iround(cache_budget / len(target_nodes))
    if cache_size == 0:
        return
    for v in target_nodes:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #7
0
def central_computation_placement(topology, computation_budget, n_services,
                                  **kwargs):
    """Places computation budget proportionally to the betweenness centrality of the
    node.
    
    Parameters
    ----------
    topology : Topology
        The topology object
    computation_budget : int
        The cumulative computation budget in terms of the number of VMs
    """
    betw = nx.betweenness_centrality(topology)
    root = [
        v for v in topology.graph['icr_candidates']
        if topology.node[v]['depth'] == 0
    ][0]
    total_betw = sum(betw.values())
    icr_candidates = topology.graph['icr_candidates']
    total = 0
    for v in icr_candidates:
        topology.node[v]['stack'][1]['computation_size'] = iround(
            computation_budget * betw[v] / total_betw)
        total += topology.node[v]['stack'][1]['computation_size']

    topology.node[root]['stack'][1]['computation_size'] += int(
        computation_budget - total)
def random_cache_placement(topology,
                           cache_budget,
                           n_cache_nodes,
                           seed=None,
                           **kwargs):
    """Deploy caching nodes randomly

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_nodes : int
        The number of caching nodes to deploy
    """
    n_cache_nodes = int(n_cache_nodes)
    icr_candidates = topology.graph['icr_candidates']
    if len(icr_candidates) < n_cache_nodes:
        raise ValueError(
            "The number of ICR candidates is lower than the target number of caches"
        )
    elif len(icr_candidates) == n_cache_nodes:
        caches = icr_candidates
    else:
        random.seed(seed)
        caches = random.sample(icr_candidates, n_cache_nodes)
    cache_size = iround(cache_budget / n_cache_nodes)
    if cache_size == 0:
        return
    for v in caches:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #9
0
def random_cache_placement(topology, cache_budget, n_cache_nodes,
                           seed=None, **kwargs):
    """Deploy caching nodes randomly

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_nodes : int
        The number of caching nodes to deploy
    """
    n_cache_nodes = int(n_cache_nodes)
    icr_candidates = topology.graph['icr_candidates']
    if len(icr_candidates) < n_cache_nodes:
        raise ValueError("The number of ICR candidates is lower than the target number of caches")
    elif len(icr_candidates) == n_cache_nodes:
        caches = icr_candidates
    else:
        random.seed(seed)
        caches = random.sample(icr_candidates, n_cache_nodes)
    cache_size = iround(cache_budget / n_cache_nodes)
    if cache_size == 0:
        return
    for v in caches:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #10
0
def clustered_hashrouting_cache_placement(topology, cache_budget, n_clusters,
                            policy, distance='delay', **kwargs):
    """Deploy caching nodes for hashrouting in with clusters

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_clusters : int
        The number of clusters
    policy : str (node_const | cluster_const)
        The expected global cache hit ratio
    distance : str
        The attribute used to quantify distance between pairs of nodes.
        Default is 'delay'
    """
    icr_candidates = topology.graph['icr_candidates']
    if n_clusters <= 0 or n_clusters > len(icr_candidates):
        raise ValueError("The number of cluster must be positive and <= the "
                         "number of ICR candidate nodes")
    elif n_clusters == 1:
        clusters = [set(icr_candidates)]
    elif n_clusters == len(icr_candidates):
        clusters = [set([v]) for v in icr_candidates]
    else:
        clusters = compute_clusters(topology, n_clusters, distance=distance,
                                    nbunch=icr_candidates, n_iter=100)
    deploy_clusters(topology, clusters, assign_src_rcv=True)
    if policy == 'node_const':
        # Each node is assigned the same amount of caching space
        cache_size = iround(cache_budget / len(icr_candidates))
        if cache_size == 0:
            return
        for v in icr_candidates:
            topology.node[v]['stack'][1]['cache_size'] = cache_size
    elif policy == 'cluster_const':
        cluster_cache_size = iround(cache_budget / n_clusters)
        for cluster in topology.graph['clusters']:
            cache_size = iround(cluster_cache_size / len(cluster))
            for v in cluster:
                if v not in icr_candidates:
                    continue
                topology.node[v]['stack'][1]['cache_size'] = cache_size
    else:
        raise ValueError('clustering policy %s not supported' % policy)
def uniform_cache_updated(topology, cache_budget, receiver_option, **kwargs):
    if(receiver_option):
        mergedlist = []
        mergedlist.extend(topology.receivers())
        mergedlist.extend(topology.routers())
        topology.graph['icr_candidates'] = set(mergedlist)
    else:
        topology.graph['icr_candidates'] = set(topology.routers())
    icr_candidates = topology.graph['icr_candidates']
    cache_size = iround(cache_budget / len(icr_candidates))
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #12
0
def optimal_hashrouting_cache_placement(topology,
                                        cache_budget,
                                        n_cache_nodes,
                                        hit_ratio,
                                        weight='delay',
                                        **kwargs):
    """Deploy caching nodes for hashrouting in optimized location

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_nodes : int
        The number of caching nodes to deploy
    hit_ratio : float
        The expected global cache hit ratio
    weight : str, optional
        The weight attribute. Default is 'delay'
    """
    n_cache_nodes = int(n_cache_nodes)
    icr_candidates = topology.graph['icr_candidates']
    if len(icr_candidates) < n_cache_nodes:
        raise ValueError("The number of ICR candidates (%d) is lower than "
                         "the target number of caches (%d)" %
                         (len(icr_candidates), n_cache_nodes))
    elif len(icr_candidates) == n_cache_nodes:
        caches = list(icr_candidates)
    else:
        # Need to optimally allocate caching nodes
        distances = dict(
            nx.all_pairs_dijkstra_path_length(topology, weight=weight))
        d = {}
        for v in icr_candidates:
            d[v] = 0
            for r in topology.receivers():
                d[v] += distances[r][v]
            for s in topology.sources():
                d[v] += distances[v][s] * hit_ratio

        # Sort caches in increasing order of distances and assign cache sizes
        caches = sorted(icr_candidates, key=lambda k: d[k])
    cache_size = iround(cache_budget / n_cache_nodes)
    if cache_size == 0:
        raise ValueError(
            "Cache budget is %d but it's too small to deploy it on %d nodes. "
            "Each node will have a zero-sized cache. "
            "Set a larger cache budget and try again" %
            (cache_budget, n_cache_nodes))
    for v in caches[:n_cache_nodes]:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #13
0
def uniform_computation_cache_repo_placement(topology, computation_budget, service_budget, storage_budget, **kwargs):
    """Places computation budget uniformly across cache nodes.

    Parameters
    ----------
    topology : Topology
        The topology object
    computation_budget : int
        The cumulative computation budget in terms of the number of VMs
    """

    icr_candidates = topology.graph['icr_candidates']
    print("Computation budget: " + repr(computation_budget))
    print("Service budget: " + repr(service_budget))
    cache_size = iround(computation_budget / (len(icr_candidates)))
    service_size = iround(service_budget / (len(icr_candidates)))
    # root = [v for v in icr_candidates if topology.node[v]['depth'] == 0][0]
    for v in icr_candidates:
        topology.node[v]['stack'][1]['service_size'] = service_size
        topology.node[v]['stack'][1]['computation_size'] = cache_size
        topology.node[v]['stack'][1]['storageSize'] = storage_budget
        topology.node[v]['stack'][1]['cache_size'] = service_size
Example #14
0
def uniform_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget uniformly across cache nodes.
    
    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    icr_candidates = topology.graph['icr_candidates']
    cache_size = iround(cache_budget / len(icr_candidates))
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #15
0
def uniform_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget uniformly across cache nodes.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    icr_candidates = topology.graph['icr_candidates']
    cache_size = iround(cache_budget / len(icr_candidates))
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #16
0
def uniform_computation_placement(topology, computation_budget, **kwargs):
    """Places computation budget uniformly across cache nodes.
    
    Parameters
    ----------
    topology : Topology
        The topology object
    computation_budget : int
        The cumulative computation budget in terms of the number of VMs
    """

    icr_candidates = topology.graph['icr_candidates']
    cache_size = iround(computation_budget / len(icr_candidates))
    for v in icr_candidates:
        topology.node[v]['stack'][1]['computation_size'] = cache_size
Example #17
0
def degree_centrality_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget proportionally to the degree of the node.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    deg = nx.degree(topology)
    total_deg = sum(deg.values())
    icr_candidates = topology.graph['icr_candidates']
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = iround(cache_budget * deg[v] / total_deg)
def core_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget uniformly across BBU cache.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    icr_candidates = topology.graph['icr_candidates']
    cache_size = iround(cache_budget / 3)
    for v in icr_candidates:
        if topology.node[v]['depth'] == 1: 
            topology.node[v]['stack'][1]['cache_size'] = cache_size    
def degree_centrality_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget proportionally to the degree of the node.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    deg = nx.degree(topology)
    total_deg = sum(deg.values())
    icr_candidates = topology.graph['icr_candidates']
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = iround(cache_budget * deg[v] / total_deg)
Example #20
0
def betweenness_centrality_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget proportionally to the betweenness centrality of the
    node.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    betw = nx.betweenness_centrality(topology)
    total_betw = sum(betw.values())
    icr_candidates = topology.graph['icr_candidates']
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = iround(cache_budget * betw[v] / total_betw)
def betweenness_centrality_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget proportionally to the betweenness centrality of the
    node.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    betw = nx.betweenness_centrality(topology)
    total_betw = sum(betw.values())
    icr_candidates = topology.graph['icr_candidates']
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = iround(cache_budget * betw[v] / total_betw)
Example #22
0
def degree_centrality_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget proportionally to the degree of the node.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    deg = dict(nx.degree(topology))
    icr_candidates = set(topology.graph["icr_candidates"])
    total_deg = sum(v for k, v in deg.items() if k in icr_candidates)
    for v in icr_candidates:
        topology.node[v]["stack"][1]["cache_size"] = iround(cache_budget *
                                                            deg[v] / total_deg)
Example #23
0
def optimal_hashrouting_cache_placement(topology, cache_budget, n_cache_nodes,
                                        hit_ratio, weight='delay', **kwargs):
    """Deploy caching nodes for hashrouting in optimized location

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_nodes : int
        The number of caching nodes to deploy
    hit_ratio : float
        The expected global cache hit ratio
    weight : str, optional
        The weight attribute. Default is 'delay'
    """
    n_cache_nodes = int(n_cache_nodes)
    icr_candidates = topology.graph['icr_candidates']
    if len(icr_candidates) < n_cache_nodes:
        raise ValueError("The number of ICR candidates (%d) is lower than "
                         "the target number of caches (%d)"
                         % (len(icr_candidates), n_cache_nodes))
    elif len(icr_candidates) == n_cache_nodes:
        caches = list(icr_candidates)
    else:
        # Need to optimally allocate caching nodes
        distances = dict(nx.all_pairs_dijkstra_path_length(topology, weight=weight))
        d = {}
        for v in icr_candidates:
            d[v] = 0
            for r in topology.receivers():
                d[v] += distances[r][v]
            for s in topology.sources():
                d[v] += distances[v][s] * hit_ratio

        # Sort caches in increasing order of distances and assign cache sizes
        caches = sorted(icr_candidates, key=lambda k: d[k])
    cache_size = iround(cache_budget / n_cache_nodes)
    if cache_size == 0:
        raise ValueError("Cache budget is %d but it's too small to deploy it on %d nodes. "
                         "Each node will have a zero-sized cache. "
                         "Set a larger cache budget and try again"
                         % (cache_budget, n_cache_nodes))
    for v in caches[:n_cache_nodes]:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
Example #24
0
def betweenness_centrality_cache_placement(topology, cache_budget, **kwargs):
    """Places cache budget proportionally to the betweenness centrality of the
    node.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    betw = dict(nx.betweenness_centrality(topology))
    icr_candidates = set(topology.graph["icr_candidates"])
    total_betw = sum(v for k, v in betw.items() if k in icr_candidates)
    for v in icr_candidates:
        topology.node[v]["stack"][1]["cache_size"] = iround(
            cache_budget * betw[v] / total_betw)
def uniform_sit_cache_placement(topology, cache_budget, n_contents, **kwargs):
    """Places cache budget uniformly across cache nodes and each receiver node get full cache budget (i.e., can cache all the contents).

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    """
    icr_candidates = topology.graph['icr_candidates']
    cache_size = iround(cache_budget / len(icr_candidates))
    for v in icr_candidates:
        topology.node[v]['stack'][1]['cache_size'] = cache_size

    for v in topology.receivers:
        topology.node[v]['stack'][1]['cache_size'] = n_contents
Example #26
0
def clustered_hashrouting_cache_placement(topology,
                                          cache_budget,
                                          n_clusters,
                                          policy,
                                          distance='delay',
                                          **kwargs):
    """Deploy caching nodes for hashrouting in with clusters

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_clusters : int
        The number of clusters
    policy : str (node_const | cluster_const)
        The expected global cache hit ratio
    distance : str
        The attribute used to quantify distance between pairs of nodes.
        Default is 'delay'

    References
    ----------
    .. [1] L. Saino, I. Psaras and G. Pavlou, Framework and Algorithms for
           Operator-managed Content Caching, in IEEE Transactions on
           Network and Service Management (TNSM), Volume 17, Issue 1, March 2020
           https://doi.org/10.1109/TNSM.2019.2956525
    .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis
           University College London, Dec. 2015. Available:
           http://discovery.ucl.ac.uk/1473436/
    """
    icr_candidates = topology.graph['icr_candidates']
    if n_clusters <= 0 or n_clusters > len(icr_candidates):
        raise ValueError("The number of cluster must be positive and <= the "
                         "number of ICR candidate nodes")
    elif n_clusters == 1:
        clusters = [set(icr_candidates)]
    elif n_clusters == len(icr_candidates):
        clusters = [set([v]) for v in icr_candidates]
    else:
        clusters = compute_clusters(topology,
                                    n_clusters,
                                    distance=distance,
                                    nbunch=icr_candidates,
                                    n_iter=100)
    deploy_clusters(topology, clusters, assign_src_rcv=True)
    if policy == 'node_const':
        # Each node is assigned the same amount of caching space
        cache_size = iround(cache_budget / len(icr_candidates))
        if cache_size == 0:
            return
        for v in icr_candidates:
            topology.node[v]['stack'][1]['cache_size'] = cache_size
    elif policy == 'cluster_const':
        cluster_cache_size = iround(cache_budget / n_clusters)
        for cluster in topology.graph['clusters']:
            cache_size = iround(cluster_cache_size / len(cluster))
            for v in cluster:
                if v not in icr_candidates:
                    continue
                topology.node[v]['stack'][1]['cache_size'] = cache_size
    else:
        raise ValueError('clustering policy %s not supported' % policy)
Example #27
0
def optimal_hashrouting_cache_placement(topology,
                                        cache_budget,
                                        n_cache_nodes,
                                        hit_ratio,
                                        weight='delay',
                                        **kwargs):
    """Deploy caching nodes for hashrouting in optimized location

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_nodes : int
        The number of caching nodes to deploy
    hit_ratio : float
        The expected global cache hit ratio
    weight : str, optional
        The weight attribute. Default is 'delay'

    References
    ----------
    .. [1] L. Saino, I. Psaras and G. Pavlou, Framework and Algorithms for
           Operator-managed Content Caching, in IEEE Transactions on
           Network and Service Management (TNSM), Volume 17, Issue 1, March 2020
           https://doi.org/10.1109/TNSM.2019.2956525
    .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis
           University College London, Dec. 2015. Available:
           http://discovery.ucl.ac.uk/1473436/
    """
    n_cache_nodes = int(n_cache_nodes)
    icr_candidates = topology.graph['icr_candidates']
    if len(icr_candidates) < n_cache_nodes:
        raise ValueError("The number of ICR candidates (%d) is lower than "
                         "the target number of caches (%d)" %
                         (len(icr_candidates), n_cache_nodes))
    elif len(icr_candidates) == n_cache_nodes:
        caches = list(icr_candidates)
    else:
        # Need to optimally allocate caching nodes
        distances = dict(
            nx.all_pairs_dijkstra_path_length(topology, weight=weight))
        d = {}
        for v in icr_candidates:
            d[v] = 0
            for r in topology.receivers():
                d[v] += distances[r][v]
            for s in topology.sources():
                d[v] += distances[v][s] * hit_ratio

        # Sort caches in increasing order of distances and assign cache sizes
        caches = sorted(icr_candidates, key=lambda k: d[k])
    cache_size = iround(cache_budget / n_cache_nodes)
    if cache_size == 0:
        raise ValueError(
            "Cache budget is %d but it's too small to deploy it on %d nodes. "
            "Each node will have a zero-sized cache. "
            "Set a larger cache budget and try again" %
            (cache_budget, n_cache_nodes))
    for v in caches[:n_cache_nodes]:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
def repo_cache_storage_placement(topology,
                                 cache_budget,
                                 storage_budget,
                                 spread=1,
                                 metric_dict=None,
                                 target='top',
                                 **kwargs):
    """
    We shall register a separate strategy in here, to place RepoStorage storage
    systems on top of nodes, instead of caches.

    TODO: Place and associate storage according to requirements, in the nodes of
        the (more/different) simulation scenarios. Add/override storageCapability
        and processingCapability to the nodes' properties. Also, actually associate
        RepoStorage objects to each node! NOT ONLY SIZES!
    """
    """Add storage to nodes, by associating RepoStorage objects to each.

    Differently from other cache placement strategies that place cache space
    to all nodes but proportionally to their centrality, this strategy places
    storage systems of all the same size in a set of selected nodes.

    Parameters
    ----------
    topology : Topology
        The topology object
    storage_budget : double
        The cumulative cache budget
    spread : float [0, 1], optional
        The spread factor, The greater it is the more the cache budget is
        spread among nodes. If it is 1, all candidate nodes are assigned a
        cache, if it is 0, only the node with the highest/lowest centrality
        is assigned a cache
    metric_dict : dict, optional
        The centrality metric according to which nodes are selected. If not
        specified, betweenness centrality is selected.
    target : ("top" | "bottom"), optional
        The subsection of the ranked node on which to deploy caches.
    """
    if spread < 0 or spread > 1:
        raise ValueError('spread factor must be between 0 and 1')
    if target not in ('top', 'bottom'):
        raise ValueError('target argument must be either "top" or "bottom"')
    if metric_dict is None and spread < 1:
        metric_dict = nx.betweenness_centrality(topology)
    """
        The following reminds me that nodes should have the storageCapability and
        processingCapability properties as well, which should be accessed here and
        by the RepoStorage systems.
    """

    icr_candidates = topology.graph['icr_candidates']
    if spread == 1:
        target_nodes = icr_candidates
    else:
        nodes = sorted(icr_candidates, key=lambda k: metric_dict[k])
        if target == 'top':
            nodes = list(reversed(nodes))
        # cutoff node must be at least one otherwise, if spread is too low, no
        # nodes would be selected
        cutoff = max(1, iround(spread * len(nodes)))
        target_nodes = nodes[:cutoff]
    storage_size = storage_budget
    if storage_size == 0:
        return
    """
    This is where nodes receive the storage association. The storage size per node is
    determined above, and should be used as a parameter, to set the size of each 
    RepoStorage object associated to each node. \/ In the following code, \/ a copy of
    the RepoStorage object should be copied and associated to each node.
    """
    for v in target_nodes:
        topology.node[v]['stack'][1]['storageSize'] = storage_size

    if spread < 0 or spread > 1:
        raise ValueError('spread factor must be between 0 and 1')
    if target not in ('top', 'bottom'):
        raise ValueError('target argument must be either "top" or "bottom"')
    if metric_dict is None and spread < 1:
        metric_dict = nx.betweenness_centrality(topology)

    icr_candidates = topology.graph['icr_candidates']
    if spread == 1:
        target_nodes = icr_candidates
    else:
        nodes = sorted(icr_candidates, key=lambda k: metric_dict[k])
        if target == 'top':
            nodes = list(reversed(nodes))
        # cutoff node must be at least one otherwise, if spread is too low, no
        # nodes would be selected
        cutoff = max(1, iround(spread * len(nodes)))
        target_nodes = nodes[:cutoff]
    cache_size = iround(cache_budget / len(target_nodes))
    if cache_size == 0:
        return
    for v in target_nodes:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
def optimal_median_cache_placement(topology,
                                   cache_budget,
                                   n_cache_nodes,
                                   hit_ratio,
                                   weight='delay',
                                   **kwargs):
    """Deploy caching nodes in locations that minimize overall latency assuming
    a partitioned strategy (a la Google Global Cache). According to this, in
    the network, a set of caching nodes are deployed and each receiver is
    mapped to one and only one caching node. Requests from this receiver are
    always sent to the designated caching node. In case of cache miss requests
    are forwarded to the original source.

    This placement problem can be mapped to the p-median location-allocation
    problem. This function solves this problem using the vertex substitution
    heuristic, which practically works like the k-medoid PAM algorithms, which
    is also similar to the k-means clustering algorithm. The result is not
    guaranteed to be globally optimal, only locally optimal.

    Notes
    -----
    This placement assumes that all receivers have degree = 1 and are connected
    to an ICR candidate nodes. Also, it assumes that contents are uniformly
    assigned to sources.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_nodes : int
        The number of caching nodes to deploy
    hit_ratio : float
        The expected cache hit ratio of a single cache
    weight : str
        The weight attribute
    """
    n_cache_nodes = int(n_cache_nodes)
    icr_candidates = topology.graph['icr_candidates']
    if len(icr_candidates) < n_cache_nodes:
        raise ValueError("The number of ICR candidates (%d) is lower than "
                         "the target number of caches (%d)" %
                         (len(icr_candidates), n_cache_nodes))
    elif len(icr_candidates) == n_cache_nodes:
        caches = list(icr_candidates)
        cache_assignment = {
            v: list(topology.edge[v].keys())[0]
            for v in topology.receivers()
        }
    else:
        # Need to optimally allocate caching nodes
        distances = nx.all_pairs_dijkstra_path_length(topology, weight=weight)
        sources = topology.sources()
        d = {u: {} for u in icr_candidates}
        for u in icr_candidates:
            source_dist = sum(distances[u][source]
                              for source in sources) / len(sources)
            for v in icr_candidates:
                if v in d[u]:
                    d[v][u] = d[u][v]
                else:
                    d[v][u] = distances[v][u] + (hit_ratio * source_dist)
        allocation, caches, _ = compute_p_median(distances, n_cache_nodes)
        cache_assignment = {
            v: allocation[list(topology.edge[v].keys())[0]]
            for v in topology.receivers()
        }

    cache_size = iround(cache_budget / n_cache_nodes)
    if cache_size == 0:
        raise ValueError(
            "Cache budget is %d but it's too small to deploy it on %d nodes. "
            "Each node will have a zero-sized cache. "
            "Set a larger cache budget and try again" %
            (cache_budget, n_cache_nodes))
    for v in caches:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
    topology.graph['cache_assignment'] = cache_assignment
Example #30
0
def optimal_median_cache_placement(topology, cache_budget, n_cache_nodes,
                                   hit_ratio, weight='delay', **kwargs):
    """Deploy caching nodes in locations that minimize overall latency assuming
    a partitioned strategy (a la Google Global Cache). According to this, in
    the network, a set of caching nodes are deployed and each receiver is
    mapped to one and only one caching node. Requests from this receiver are
    always sent to the designated caching node. In case of cache miss requests
    are forwarded to the original source.

    This placement problem can be mapped to the p-median location-allocation
    problem. This function solves this problem using the vertex substitution
    heuristic, which practically works like the k-medoid PAM algorithms, which
    is also similar to the k-means clustering algorithm. The result is not
    guaranteed to be globally optimal, only locally optimal.

    Notes
    -----
    This placement assumes that all receivers have degree = 1 and are connected
    to an ICR candidate nodes. Also, it assumes that contents are uniformly
    assigned to sources.

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_nodes : int
        The number of caching nodes to deploy
    hit_ratio : float
        The expected cache hit ratio of a single cache
    weight : str
        The weight attribute
    """
    n_cache_nodes = int(n_cache_nodes)
    icr_candidates = topology.graph['icr_candidates']
    if len(icr_candidates) < n_cache_nodes:
        raise ValueError("The number of ICR candidates (%d) is lower than "
                         "the target number of caches (%d)"
                         % (len(icr_candidates), n_cache_nodes))
    elif len(icr_candidates) == n_cache_nodes:
        caches = list(icr_candidates)
        cache_assignment = {v: list(topology.adj[v].keys())[0]
                            for v in topology.receivers()}
    else:
        # Need to optimally allocate caching nodes
        distances = dict(nx.all_pairs_dijkstra_path_length(topology, weight=weight))
        sources = topology.sources()
        d = {u: {} for u in icr_candidates}
        for u in icr_candidates:
            source_dist = sum(distances[u][source] for source in sources) / len(sources)
            for v in icr_candidates:
                if v in d[u]:
                    d[v][u] = d[u][v]
                else:
                    d[v][u] = distances[v][u] + (hit_ratio * source_dist)
        allocation, caches, _ = compute_p_median(distances, n_cache_nodes)
        cache_assignment = {v: allocation[list(topology.adj[v].keys())[0]]
                            for v in topology.receivers()}

    cache_size = iround(cache_budget / n_cache_nodes)
    if cache_size == 0:
        raise ValueError("Cache budget is %d but it's too small to deploy it on %d nodes. "
                         "Each node will have a zero-sized cache. "
                         "Set a larger cache budget and try again"
                         % (cache_budget, n_cache_nodes))
    for v in caches:
        topology.node[v]['stack'][1]['cache_size'] = cache_size
    topology.graph['cache_assignment'] = cache_assignment