Ejemplo n.º 1
0
def clustered_hashrouting_cache_placement(topology,
                                          cache_budget,
                                          n_clusters,
                                          policy,
                                          distance='delay',
                                          **kwargs):
    """Deploy caching nodes for hashrouting in with clusters

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_clusters : int
        The number of clusters
    policy : str (node_const | cluster_const)
        The expected global cache hit ratio
    distance : str
        The attribute used to quantify distance between pairs of nodes.
        Default is 'delay'
    """
    icr_candidates = topology.graph['icr_candidates']
    if n_clusters <= 0 or n_clusters > len(icr_candidates):
        raise ValueError("The number of cluster must be positive and <= the "
                         "number of ICR candidate nodes")
    elif n_clusters == 1:
        clusters = [set(icr_candidates)]
    elif n_clusters == len(icr_candidates):
        clusters = [set([v]) for v in icr_candidates]
    else:
        clusters = compute_clusters(topology,
                                    n_clusters,
                                    distance=distance,
                                    nbunch=icr_candidates,
                                    n_iter=100)
    deploy_clusters(topology, clusters, assign_src_rcv=True)
    if policy == 'node_const':
        # Each node is assigned the same amount of caching space
        cache_size = iround(cache_budget / len(icr_candidates))
        if cache_size == 0:
            return
        for v in icr_candidates:
            topology.node[v]['stack'][1]['cache_size'] = cache_size
    elif policy == 'cluster_const':
        cluster_cache_size = iround(cache_budget / n_clusters)
        for cluster in topology.graph['clusters']:
            cache_size = iround(cluster_cache_size / len(cluster))
            for v in cluster:
                if v not in icr_candidates:
                    continue
                topology.node[v]['stack'][1]['cache_size'] = cache_size
    else:
        raise ValueError('clustering policy %s not supported' % policy)
Ejemplo n.º 2
0
def clustered_hashrouting_cache_placement(topology, cache_budget, n_clusters,
                            policy, distance='delay', **kwargs):
    """Deploy caching nodes for hashrouting in with clusters

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_clusters : int
        The number of clusters
    policy : str (node_const | cluster_const)
        The expected global cache hit ratio
    distance : str
        The attribute used to quantify distance between pairs of nodes.
        Default is 'delay'
    """
    icr_candidates = topology.graph['icr_candidates']
    if n_clusters <= 0 or n_clusters > len(icr_candidates):
        raise ValueError("The number of cluster must be positive and <= the "
                         "number of ICR candidate nodes")
    elif n_clusters == 1:
        clusters = [set(icr_candidates)]
    elif n_clusters == len(icr_candidates):
        clusters = [set([v]) for v in icr_candidates]
    else:
        clusters = compute_clusters(topology, n_clusters, distance=distance,
                                    nbunch=icr_candidates, n_iter=100)
    deploy_clusters(topology, clusters, assign_src_rcv=True)
    if policy == 'node_const':
        # Each node is assigned the same amount of caching space
        cache_size = iround(cache_budget / len(icr_candidates))
        if cache_size == 0:
            return
        for v in icr_candidates:
            topology.node[v]['stack'][1]['cache_size'] = cache_size
    elif policy == 'cluster_const':
        cluster_cache_size = iround(cache_budget / n_clusters)
        for cluster in topology.graph['clusters']:
            cache_size = iround(cluster_cache_size / len(cluster))
            for v in cluster:
                if v not in icr_candidates:
                    continue
                topology.node[v]['stack'][1]['cache_size'] = cache_size
    else:
        raise ValueError('clustering policy %s not supported' % policy)
Ejemplo n.º 3
0
def clustered_hashrouting_cache_placement(topology,
                                          cache_budget,
                                          n_clusters,
                                          policy,
                                          distance='delay',
                                          **kwargs):
    """Deploy caching nodes for hashrouting in with clusters

    Parameters
    ----------
    topology : Topology
        The topology object
    cache_budget : int
        The cumulative cache budget
    n_clusters : int
        The number of clusters
    policy : str (node_const | cluster_const)
        The expected global cache hit ratio
    distance : str
        The attribute used to quantify distance between pairs of nodes.
        Default is 'delay'

    References
    ----------
    .. [1] L. Saino, I. Psaras and G. Pavlou, Framework and Algorithms for
           Operator-managed Content Caching, in IEEE Transactions on
           Network and Service Management (TNSM), Volume 17, Issue 1, March 2020
           https://doi.org/10.1109/TNSM.2019.2956525
    .. [2] L. Saino, On the Design of Efficient Caching Systems, Ph.D. thesis
           University College London, Dec. 2015. Available:
           http://discovery.ucl.ac.uk/1473436/
    """
    icr_candidates = topology.graph['icr_candidates']
    if n_clusters <= 0 or n_clusters > len(icr_candidates):
        raise ValueError("The number of cluster must be positive and <= the "
                         "number of ICR candidate nodes")
    elif n_clusters == 1:
        clusters = [set(icr_candidates)]
    elif n_clusters == len(icr_candidates):
        clusters = [set([v]) for v in icr_candidates]
    else:
        clusters = compute_clusters(topology,
                                    n_clusters,
                                    distance=distance,
                                    nbunch=icr_candidates,
                                    n_iter=100)
    deploy_clusters(topology, clusters, assign_src_rcv=True)
    if policy == 'node_const':
        # Each node is assigned the same amount of caching space
        cache_size = iround(cache_budget / len(icr_candidates))
        if cache_size == 0:
            return
        for v in icr_candidates:
            topology.node[v]['stack'][1]['cache_size'] = cache_size
    elif policy == 'cluster_const':
        cluster_cache_size = iround(cache_budget / n_clusters)
        for cluster in topology.graph['clusters']:
            cache_size = iround(cluster_cache_size / len(cluster))
            for v in cluster:
                if v not in icr_candidates:
                    continue
                topology.node[v]['stack'][1]['cache_size'] = cache_size
    else:
        raise ValueError('clustering policy %s not supported' % policy)