def __init__(self, topology, log_dir, scenario_id):
     """
     Constructor
     """
     # create logging objects
     # for running on clay or EE lab, open logging files in /tmp
     # and then (compress it) and move it to home. This is to avoid frequent
     # NFS writes, so I write on tmp which is on local drive and copy to home
     # (NFS) only when the simulation is over.
     # Compression may be needed on EE lab machine because there is a 5 GB
     # quota that might be exceeded by logs
     self.log_dir = log_dir
     self.scenario_id = scenario_id
     self.link_logger = LinkLogger(path.join(log_dir, 'RESULTS_%s_LINK.txt' % scenario_id))
     self.cache_logger = CacheLogger(path.join(log_dir, 'RESULTS_%s_CACHE.txt' % scenario_id))
     self.delay_logger = DelayLogger(path.join(log_dir, 'RESULTS_%s_DELAY.txt' % scenario_id))
     self.topology = topology
     # calc shortest paths
     self.shortest_path = nx.all_pairs_dijkstra_path(topology, weight='weight')
     # get location of caches and content sources
     self.content_location = {}   # dict of location of contents keyed by content ID
     self.cache_size = {}         # dict of cache sizes keyed by node
     # Link type: internal or external
     self.link_type = dict([((u,v), topology.edge[u][v]['type'])
                            for u in topology.edge
                            for v in topology.edge[u]])
     for node in topology.nodes_iter():
         stack_name, stack_props = get_stack(topology, node)
         if stack_name == 'cache':
             self.cache_size[node] = stack_props['size']
         elif stack_name == 'source':
             contents = stack_props['contents']
             for content in contents:
                 self.content_location[content] = node
     # create actual cache objects
     self.caches = dict([(node, Cache(self.cache_size[node])) for node in self.cache_size])
class BaseStrategy(object):
    """
    Base strategy imported by all other strategy classes
    """

    def __init__(self, topology, log_dir, scenario_id):
        """
        Constructor
        """
        # create logging objects
        # for running on clay or EE lab, open logging files in /tmp
        # and then (compress it) and move it to home. This is to avoid frequent
        # NFS writes, so I write on tmp which is on local drive and copy to home
        # (NFS) only when the simulation is over.
        # Compression may be needed on EE lab machine because there is a 5 GB
        # quota that might be exceeded by logs
        self.log_dir = log_dir
        self.scenario_id = scenario_id
        self.link_logger = LinkLogger(path.join(log_dir, 'RESULTS_%s_LINK.txt' % scenario_id))
        self.cache_logger = CacheLogger(path.join(log_dir, 'RESULTS_%s_CACHE.txt' % scenario_id))
        self.delay_logger = DelayLogger(path.join(log_dir, 'RESULTS_%s_DELAY.txt' % scenario_id))
        self.topology = topology
        # calc shortest paths
        self.shortest_path = nx.all_pairs_dijkstra_path(topology, weight='weight')
        # get location of caches and content sources
        self.content_location = {}   # dict of location of contents keyed by content ID
        self.cache_size = {}         # dict of cache sizes keyed by node
        # Link type: internal or external
        self.link_type = dict([((u,v), topology.edge[u][v]['type'])
                               for u in topology.edge
                               for v in topology.edge[u]])
        for node in topology.nodes_iter():
            stack_name, stack_props = get_stack(topology, node)
            if stack_name == 'cache':
                self.cache_size[node] = stack_props['size']
            elif stack_name == 'source':
                contents = stack_props['contents']
                for content in contents:
                    self.content_location[content] = node
        # create actual cache objects
        self.caches = dict([(node, Cache(self.cache_size[node])) for node in self.cache_size])

    def log_transfer(self, time, origin_node, destination_node, packet_type, content_id):
        """
        Log the transfer of a packet from an origin to a destination node.
        It is assumed that routing is based on Dijkstra shortest paths.
        """
        path = self.shortest_path[origin_node][destination_node]
        for hop in range(1, len(path)):
            u = path[hop - 1]
            v = path[hop]
            self.link_logger.log_link_info(time, u, v, packet_type, content_id, self.link_type[(u,v)])

    def content_hash(self, content_id):
        """
        Return a hash code of the content ID for hash-routing purposes
        """
        #TODO: This hash function needs revision cause it does not return equally probably hash codes
        n_caches = len(self.cache_size)
        hash_code = content_id % n_caches
        if (content_id/n_caches) % 2 == 0:
            return hash_code
        else:
            return n_caches - hash_code - 1
    
    
    def assign_caches(self, topology, cache_size, replicas, **kwargs):
        """
        Algorithm that decides how to allocate intervals of hash
        space to various caches.
        
        It returns a dictionary of lists keyed by hash interval ID. Each list is
        the list of the caches that are authorized to store the content. A list is
        returned also in the case the number of replicas is 1.
        """
        cache_nodes = list(cache_size.keys())
        return dict([(i, cache_nodes[i]) for i in range(len(cache_size))])
        
    def close(self):
        # Add entry to summary log file
        CacheHitRatioSummary(self.log_dir).write_summary(self.scenario_id, self.cache_logger.cache_hit_ratio())
        NetworkLoadSummary(self.log_dir).write_summary(self.scenario_id, self.link_logger.network_load())
        # Copy files back in my home folder before exiting
        self.link_logger.close()
        self.cache_logger.close()
        self.delay_logger.close()