def __init__(self, interval=1, logfile = dconf.LinksMonitor_LogFile):
     self.db = DatabaseHandler() 
     log.info("LINKS MONITOR -- interval: %s -- logfile: %s --\n"%(str(interval),logfile))
     log.info("-"*60+"\n")
     log.info("Read all edges from network...\n")
     self.links = self.db.getAllEdges()
     self.interval = interval
     log.info("Start all counters...\n")
     self.counters = self._startCounters()
     self.logfile = logfile
     log.info("%s\n"%self.printLinksToEdges())
    def __init__(self, *args, **kwargs):
        super(TrafficGenerator, self).__init__(*args, **kwargs)

        self.scheduler = sched.scheduler(time.time, time.sleep)
        self.db = DatabaseHandler()
        self.thread_handlers = []

        # IP of the Load Balancer Controller host.
        try:
            self._lbc_ip = ipaddress.ip_interface(self.db.getIpFromHostName(dconf.LBC_Hostname)).ip.compressed
        except:
            log.info("WARNING: Load balancer controller could not be found in the network\n")
            self._lbc_ip = None
    def __init__(self, requestQueue, responseQueue):
        super(feedbackThread, self).__init__()

        # Create queue attributes
        self.requestQueue = requestQueue
        self.responseQueue = responseQueue

        # Read network database
        self.db = DatabaseHandler()

        # Fill router cap files
        self.capFilesDict = self.pickCapFiles()

        # Data structure that maintains a set of current flows passing
        # through each router in the last second
        self.router_flowsets = {}
        self.updateRouterFlowSets()
    def __init__(self, capacity_graph, lock, logfile, median_filter=False, interval=1.01):
        super(LinksMonitorThread, self).__init__()
        # Read network database
        self.db = DatabaseHandler()

        # Lock object to access capacity graph
        self.lock = lock

        # Counters read interval
        self.interval = interval

        # Capacity graph object
        self.cg = capacity_graph

        # Dictionary that holds the binding between router id and the
        # router ip in the control network
        self.ip_to_control = {}
        
        # Start router counters
        self.counters = self._startCounters()

        # Perform median filter or not?
        self.median_filter = median_filter
        
        # Start router-to-router links
        self.links = self._startLinks()

        # Used internally for the logs
        self.link_to_edge_bindings = self._createLinkToEdgeBindings()

        # Set log file
        if logfile:
            self.logfile = logfile
            # Write first line with links
            with open(self.logfile, 'w') as f:
                f.write(self.printLinkToEdgesLine(self.cg))
        else:
            self.logfile = None
class LinksMonitor(object):
    """
    Implements the class.
    """
    def __init__(self, interval=1, logfile = dconf.LinksMonitor_LogFile):
        self.db = DatabaseHandler() 
        log.info("LINKS MONITOR -- interval: %s -- logfile: %s --\n"%(str(interval),logfile))
        log.info("-"*60+"\n")
        log.info("Read all edges from network...\n")
        self.links = self.db.getAllEdges()
        self.interval = interval
        log.info("Start all counters...\n")
        self.counters = self._startCounters()
        self.logfile = logfile
        log.info("%s\n"%self.printLinksToEdges())

    def printLinksToEdges(self):
        s = "Links to edges:\n"
        taken = []
        for link, data in self.links.iteritems():
            (x, y) = data['edge']
            if (x,y) in taken or (y,x) in taken:
                continue
            if not('r' in x and 'r' in y):
                continue
            taken.append((x,y))
            s += link+' -> '+str(data['edge'])+'\n'
        s += '\n\n'
        return s

    def printLinkToEdgesLine(self):
        s = ""
        taken = []
        for link, data in self.links.iteritems():
            (x, y) = data['edge']
            if (x,y) in taken or (y,x) in taken:
                continue
            taken.append((x,y))
            (x,y) = data['edge']
            s += link+'->(%s %s),'%(x,y)
        s += '\n'
        return s
    
    def __str__(self):
        s = ""
        taken = []
        for link, data in self.links.iteritems():
            (x, y) = data['edge']
            if (x,y) in taken or (y,x) in taken:
                continue
            taken.append((x,y))
            s += "%s %s -> load: (%.2f%%)\n"%(link, data['edge'], (100*data['load'])/data['bw'])
        s += '\n'
        return s    

    def log(self):
        """This function logs the state of the links. f is supposed to be an
        open python file with write access

        """
        f = open(self.logfile, 'a')
        s = "%s"%time.time()
        taken = []
        for link, data in self.links.iteritems():
            (x, y) = data['edge']
            if (x,y) in taken or (y,x) in taken:
                continue
            taken.append((x,y))
            load = data['load']
            s += ",(%s %.3f%%)"%(link, load)
        s += '\n'
        f.write(s)    
        f.close()

    def run(self):
        """
        """
        log.info("Going inside the run() loop...\n")
        # Write edges info to log file (first line)
        f = open(self.logfile, 'a')
        f.write(self.printLinkToEdgesLine())
        f.close()

        while True:
            # Update links with fresh data from the counters
            self.updateLinks()
            #log.info("Links updated...\n")

            # Log new values to logfile
            #log.info("Logging...\n")
            self.log()
            
            # Go to sleep for some interval time
            #log.info("Going to sleep...\n")
            time.sleep(self.interval/2)

    def _startCounters(self):
        start = time.time()
        routers = self.db.getRouters()
        counters_dict = {name:{'routerid':rid, 'counter': SnmpCounters(routerIp=rid)} for name, rid in routers}
        if time_info:
            log.info("linksmonitor.py: _startCounters() took %d seconds\n"%(time.time()-start))
        return counters_dict

    def _updateCounters(self):
        """Reads all counters of the routers in the network. Blocks until the
        counters have been updated.
        """
        start = time.time()
        for r, data in self.counters.iteritems():
            counter = data['counter']
            while (counter.fromLastLecture() < self.interval):
                pass
            counter.updateCounters32()
        if time_info:
            log.info("linksmonitor.py: _updateCounters() took %d seconds\n"%(time.time()-start))

    def _setLinkLoad(self, iface_name, load):
        name = [name for name, data in self.links.iteritems() if
                data['interface'] == iface_name]
        if name != []:
            name = name[0]
        self.links[name]['load'] = load
        
    def updateLinks(self):
        # Update the counters first
        start = time.time()
        self._updateCounters()
        log.info("%s\n"%str(self.links))
        # Iterate the counters
        for name, data in self.counters.iteritems():
            # Get the counter object for each router
            counter = data['counter']
            # Get the router id for the counter
            routerid = counter.routerIp
            # Get ifaces name and load for each router interface
            iface_names = [data['name'] for data in counter.interfaces]
            loads = counter.getLoads()
            elapsed_time = counter.timeDiff

            bandwidths = []
            for ifacename in iface_names:
                bw_tmp= [d['bw'] for link, d in
                         self.links.iteritems() if d['interface']
                         == ifacename]
                if bw_tmp != []:
                    bandwidths.append(bw_tmp[0])
                    
            bandwidths = np.asarray(bandwidths)
            currentPercentages = np.multiply(loads/(np.multiply(bandwidths, elapsed_time)), 100)
            #log.info("Elapsed time: %s\n"%elapsed_time)
            #log.info("Loads: %s\n"%str(loads))
            #log.info("Bws: %s\n"%str(bandwidths))
            
            # Set link loads by interface name
            for i, iface_name in enumerate(iface_names):
                iface_load = currentPercentages[i]
                self._setLinkLoad(iface_name, iface_load)

        if time_info:
            log.info("linksmonitor.py: updateLinks() took %d seconds\n"%(time.time()-start))
    def __init__(self, congestionThreshold = 0.95):
        """It basically reads the network topology from the MyGraphProvider,
        which is running in another thread because
        SouthboundManager.run() is blocking.
        
        Here we are assuming that the topology does not change.
        """
        # Dictionary that keeps the allocation of the flows in the network paths
        self.flow_allocation = {} 
        # {prefixA: {flow1 : [path_list], flow2 : [path_list]},
        #  prefixB: {flow4 : [path_list], flow3 : [path_list]}}

        # Lock to make flow_allocation thread-safe
        self.flowAllocationLock = threading.Lock()
        
        # From where to read events 
        self.eventQueue = eventQueue
        
        # Used to schedule flow alloc. removals
        self.thread_handlers = {} 

        # Data structure that holds the current forwarding dags for
        # all advertised destinations in the network
        self.dagsLock = threading.Lock()
        self.dags = {}

        # Set the congestion threshold
        self.congestionThreshold = congestionThreshold
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Congestion Threshold is set to %.2f%% of the link\n"%(t, (self.congestionThreshold)*100.0))
        
        # Used to stop the thread
        self._stop = threading.Event() 

        # Object that handles the topology database
        self.db = DatabaseHandler()
    
        # Connects to the southbound controller. Must be called before
        # create instance of SouthboundManager
        CFG.read(dconf.C1_Cfg) 

        # Start the Southbound manager in a different thread.    
        self.sbmanager = MyGraphProvider()
        t = threading.Thread(target=self.sbmanager.run, name="Graph Listener")
        t.start()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Graph Listener thread started\n"%t)

        # Blocks until initial graph arrived notification is received
        # from southbound manager
        HAS_INITIAL_GRAPH.wait() 
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Initial graph received\n"%t)

        # Retreieve network graph from southbound manager
        self.network_graph = self.sbmanager.igp_graph

        # Mantains the list of the network prefixes advertised by the OSPF routers
        self.ospf_prefixes = self._fillInitialOSPFPrefixes()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Initial OSPF prefixes read\n"%t)
        
        # Include BW data inside the initial graph.
        n_router_links = self._countRouter2RouterEdges()
        self._readBwDataFromDB()
        i = 0
        while not self._bwInAllRouterEdges(n_router_links):
            i += 1
            time.sleep(1)
            self._readBwDataFromDB()            
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Bandwidths written in network_graph after %d iterations\n"%(t,i))

        # Read the initial graph. We keep this as a copy of the
        # physical topology. In initial graph, the instantaneous
        # capacities of the links are kept.
        self.initial_graph = self.network_graph.copy()
        
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Created IP-names bindings\n"%t)
        log.info("\tHostname\tip\tsubnet\n")
        for name, data in self.db.hosts_to_ip.iteritems():
            log.info("\t%s\t%s\t%s\n"%(name, data['iface_host'], data['iface_router']))

        log.info("\tRouter name\tip\t\n")
        for name, ip in self.db.routers_to_ip.iteritems():
            log.info("\t%s\t%s\n"%(name, ip))

        # Create here the initial DAGS for each destination in the
        # network
        self._createInitialDags()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Initial DAGS created\n"%t)

        # Spawn Json listener thread
        jl = JsonListener(self.eventQueue)
        jl.start()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Json listener thread created\n"%t)

        # Create attributes
        self.feedbackRequestQueue = feedbackRequestQueue
        self.feedbackResponseQueue = feedbackResponseQueue
        
        # Dict in which we save flows pending for allocation feedback
        self.pendingForFeedback = {}

        # Spawn FeedbackThread
        ft = feedbackThread(self.feedbackRequestQueue, self.feedbackResponseQueue)
        ft.start()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - feedbackThread started\n"%t)
class LBController(object):
    def __init__(self, congestionThreshold = 0.95):
        """It basically reads the network topology from the MyGraphProvider,
        which is running in another thread because
        SouthboundManager.run() is blocking.
        
        Here we are assuming that the topology does not change.
        """
        # Dictionary that keeps the allocation of the flows in the network paths
        self.flow_allocation = {} 
        # {prefixA: {flow1 : [path_list], flow2 : [path_list]},
        #  prefixB: {flow4 : [path_list], flow3 : [path_list]}}

        # Lock to make flow_allocation thread-safe
        self.flowAllocationLock = threading.Lock()
        
        # From where to read events 
        self.eventQueue = eventQueue
        
        # Used to schedule flow alloc. removals
        self.thread_handlers = {} 

        # Data structure that holds the current forwarding dags for
        # all advertised destinations in the network
        self.dagsLock = threading.Lock()
        self.dags = {}

        # Set the congestion threshold
        self.congestionThreshold = congestionThreshold
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Congestion Threshold is set to %.2f%% of the link\n"%(t, (self.congestionThreshold)*100.0))
        
        # Used to stop the thread
        self._stop = threading.Event() 

        # Object that handles the topology database
        self.db = DatabaseHandler()
    
        # Connects to the southbound controller. Must be called before
        # create instance of SouthboundManager
        CFG.read(dconf.C1_Cfg) 

        # Start the Southbound manager in a different thread.    
        self.sbmanager = MyGraphProvider()
        t = threading.Thread(target=self.sbmanager.run, name="Graph Listener")
        t.start()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Graph Listener thread started\n"%t)

        # Blocks until initial graph arrived notification is received
        # from southbound manager
        HAS_INITIAL_GRAPH.wait() 
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Initial graph received\n"%t)

        # Retreieve network graph from southbound manager
        self.network_graph = self.sbmanager.igp_graph

        # Mantains the list of the network prefixes advertised by the OSPF routers
        self.ospf_prefixes = self._fillInitialOSPFPrefixes()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Initial OSPF prefixes read\n"%t)
        
        # Include BW data inside the initial graph.
        n_router_links = self._countRouter2RouterEdges()
        self._readBwDataFromDB()
        i = 0
        while not self._bwInAllRouterEdges(n_router_links):
            i += 1
            time.sleep(1)
            self._readBwDataFromDB()            
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Bandwidths written in network_graph after %d iterations\n"%(t,i))

        # Read the initial graph. We keep this as a copy of the
        # physical topology. In initial graph, the instantaneous
        # capacities of the links are kept.
        self.initial_graph = self.network_graph.copy()
        
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Created IP-names bindings\n"%t)
        log.info("\tHostname\tip\tsubnet\n")
        for name, data in self.db.hosts_to_ip.iteritems():
            log.info("\t%s\t%s\t%s\n"%(name, data['iface_host'], data['iface_router']))

        log.info("\tRouter name\tip\t\n")
        for name, ip in self.db.routers_to_ip.iteritems():
            log.info("\t%s\t%s\n"%(name, ip))

        # Create here the initial DAGS for each destination in the
        # network
        self._createInitialDags()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Initial DAGS created\n"%t)

        # Spawn Json listener thread
        jl = JsonListener(self.eventQueue)
        jl.start()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Json listener thread created\n"%t)

        # Create attributes
        self.feedbackRequestQueue = feedbackRequestQueue
        self.feedbackResponseQueue = feedbackResponseQueue
        
        # Dict in which we save flows pending for allocation feedback
        self.pendingForFeedback = {}

        # Spawn FeedbackThread
        ft = feedbackThread(self.feedbackRequestQueue, self.feedbackResponseQueue)
        ft.start()
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - feedbackThread started\n"%t)
        
        
    def run(self):
        """Main loop that deals with new incoming events
        """
        while not self.isStopped():
            # Get event from the queue (blocking)
            event = self.eventQueue.get()
            log.info(lineend)
            t = time.strftime("%H:%M:%S", time.gmtime())
            log.info("%s - run(): NEW event in the queue\n"%t)
            log.info("\t* Type: %s\n"%event['type'])
            
            if event['type'] == 'newFlowStarted':
                # Fetch flow from queue
                flow = event['data']
                log.info("\t* Flow: %s\n"%self.toLogFlowNames(flow))

                # We assume that upon dealing with a new flow, the
                # self.dags is not accessed by any other thread
                with self.dagsLock:
                    with self.flowAllocationLock:
                        # Deal with new flow
                        self.dealWithNewFlow(flow)
                
            else:
                t = time.strftime("%H:%M:%S", time.gmtime())
                log.info("%s - run(): UNKNOWN Event\n"%t)
                log.info("\t* Event: "%str(event))

    def dealWithNewFlow(self, flow):
        """Called when a new flow arrives. This method should be overwritten
        by each of the subclasses performing the various algorithms.

        When this function is called, no algorithm to allocate flows
        is called. The LBController only keeps track of the default
        allocations of the flows.
        """
        # In general, this won't be True that often...
        ecmp = False
        
        # Get the communicating interfaces
        src_iface = flow['src']
        dst_iface = flow['dst']

        # Get host ip's
        src_ip = src_iface.ip
        dst_ip = dst_iface.ip

        # Get their correspoding networks
        src_network = src_iface.network
        dst_network = self.getCurrentOSPFPrefix(dst_iface.compressed)

        # Get the string-type prefixes        
        src_prefix = src_network.compressed
        dst_prefix = dst_network.compressed

        # Get the current path from source to destination
        currentPaths = self.getActivePaths(src_iface, dst_iface, dst_prefix)

        if len(currentPaths) > 1:
            # ECMP is happening
            ecmp = True
            t = time.strftime("%H:%M:%S", time.gmtime())
            log.info("%s - dealWithNewFlow(): ECMP is ACTIVE\n"%t)
        elif len(currentPaths) == 1:
            ecmp = False
            t = time.strftime("%H:%M:%S", time.gmtime())
            log.info("%s - dealWithNewFlow(): ECMP is NOT active\n"%t)
        else:
            t = time.strftime("%H:%M:%S", time.gmtime())
            log.info("%s - dealWithNewFlow(): ERROR\n"%t)

        # Detect if flow is going to create congestion
        if self.canAllocateFlow(flow, currentPaths):
            t = time.strftime("%H:%M:%S", time.gmtime())
            log.info("%s - dealWithNewFlow(): Flow can be ALLOCATED\n"%t)

        else:
            t = time.strftime("%H:%M:%S", time.gmtime())
            log.info("%s - dealWithNewFlow(): Flow will cause CONGESTION\n"%t)

        # We just allocate the flow to the currentPaths
        self.addAllocationEntry(dst_prefix, flow, currentPaths)

    def stop(self):
        """Stop the LBController correctly
        """
        #Here we should deal with the handlers of the spawned threads
        #and subprocesses...
        self._stop.set()
   
    def isStopped(self):
        """Check if LBController is set to be stopped or not
        """
        return self._stop.isSet()
  
    def _readBwDataFromDB(self):
        """Introduces BW data from /tmp/db.topo into the network DiGraph and
        sets the capacity to the link bandwidth.
        """
        for (x, y, data) in self.network_graph.edges(data=True):
            if 'C' in x or 'C' in y: # means is the controller...
                continue
            
            if self.network_graph.is_router(x) and self.network_graph.is_router(y):
                # Fill edges between routers only!
                xname = self.db.getNameFromIP(x)
                yname = self.db.getNameFromIP(y)
                if xname and yname:
                    try:
                        bw = self.db.interface_bandwidth(xname, yname)
                        data['bw'] = int(bw*1e6)
                        data['capacity'] = int(bw*1e6)
                        data['mincap'] = (1-self.congestionThreshold)*(bw*1e6)
                    except:
                        import ipdb; ipdb.set_trace()
                        print "EXCEPTION"
                        print x,y
                        print xname, yname
                else:
                    t = time.strftime("%H:%M:%S", time.gmtime())
                    log.info("%s - _readBwDataFromDB(): ERROR: did not find %s (%s) and %s (%s)\n"%(t, x, xname, y, yname))

                
    def _countRouter2RouterEdges(self):
        """
        Counts how many unidirectional links between routers exist in the network
        """
        routers = [n for (n, data) in self.db.network.iteritems() if data['type'] == 'router']
        edges_count = 0
        for r in routers:
            data = self.db.network[r]
            for n, d in data.iteritems():
                if type(d) == dict:
                    try:
                        self.db.routerid(n)
                    except TypeError:
                        pass
                    else:
                        edges_count +=1
        return edges_count

    def _countWrittenBw(self):
        ep = [1 if 'capacity' in data.keys() and 'bw' in
              data.keys() else 0 for (x, y, data) in
              self.network_graph.edges(data=True) if
              self.network_graph.is_router(x) and
              self.network_graph.is_router(y)]
        return sum(ep)

    def _bwInAllRouterEdges(self, n_router_links):
        current_count = self._countWrittenBw()
        return current_count == n_router_links and current_count != 0

    def _fillInitialOSPFPrefixes(self):
        """
        Fills up the data structure
        """
        prefixes = []
        for prefix in self.network_graph.prefixes:
            prefixes.append(ipaddress.ip_network(prefix))
        return prefixes

    def getCurrentOSPFPrefix(self, interface_ip):
        """Given a interface ip address of a host in the mininet network,
        returns the longest prefix currently being advertised by the
        OSPF routers.

        :param interface_ip: string representing a host's interface ip
                             address. E.g: '192.168.233.254/30'

        Returns: an ipaddress.IPv4Network object
        """
        iface = ipaddress.ip_interface(interface_ip)
        iface_nw = iface.network
        iface_ip = iface.ip
        longest_match = (None, 0)
        for prefix in self.ospf_prefixes:
            prefix_len = prefix.prefixlen
            if iface_ip in prefix and prefix_len > longest_match[1]:
                longest_match = (prefix, prefix_len)
        return longest_match[0]

    def getCurrentDag(self, dst):
        """
        Returns a copy of the current DAG towards destination
        """
        return self.dags[dst].copy()

    def getInitialDag(self, dst):
        currentDag = self.getCurrentDag(dst)
        initialDag = currentDag.copy()
        
        # set fibbed edges to notactive and default-ones to active
        for (u, v, data) in currentDag.edges(data=True):
            if data['fibbed'] == True:
                initialDag.get_edge_data(u,v)['active'] = False
            else:
                initialDag.get_edge_data(u,v)['active'] = True
        return initialDag
        
    def setCurrentDag(self, dst, dag):
        """
        Sets the current DAG towards destination
        """
        self.dags[dst] = dag
        
    def getActiveEdges(self, dag, node):
        activeEdges = []
        for n, data in dag[node].iteritems():
            if data['active'] == True:
                activeEdges.append((node, n))
        return activeEdges

    def getFibbedEdges(self, dag, node):
        """
        Returns the fibbed edges in the
        """
        fibbedEdges = []
        for n, data in dag[node].iteritems():
            if data['fibbed'] == True:
                fibbedEdges.append((node, n))
        return fibbedEdges

    def getDefaultEdges(self, dag, node):
        """Returns the list of edges from node that are used by default in
        OSPF"""
        defaultEdges = []
        for n, data in dag[node].iteritems():
            if data['fibbed'] == False:
                defaultEdges.append((node, n))
        return defaultEdges

    def switchDagEdgesData(self, dag, path_list, **kwargs):
        """Sets the data of the edges in path_list to the attributes expressed
        in kwargs.

        :param dag: nx.DiGraph representing the dag of the destination
                    subnet that we want to change the edges state.

        :param path_list: list of paths. E.g: [[A,B,C],[A,G,C]...]

        :param **kwargs: Edge attributes to be set.

        """
        # Check first if we have a path_list or a edges_list
        if path_list != [] and isinstance(path_list[0], tuple):
            # We have an edges list
            edge_list = path_list
                        
        elif path_list != [] and isinstance(path_list[0], list):
            # We have a path_list
            edge_list = self.getEdgesFromPathList(path_list)
            
        if path_list != []:
            for (u,v) in edge_list:
                if (u,v) not in dag.edges():
                    # The initial edges will never get the fibbed
                    # attribute set to True, since they exist in the dag
                    # from the beginning.
                    dag.add_edge(u,v)
                    edge_data = dag.get_edge_data(u,v)
                    dag.get_edge_data(u,v)['fibbed'] = True

                # Do for all edges
                edge_data = dag.get_edge_data(u,v)
                for key, value in kwargs.iteritems():
                    edge_data[key] = value

        # Return modified dag when finished
        return dag

    def getActiveDag(self, dst):
        """Returns the DAG being currently deployed in practice for the given
        destination.
        """
        dag = self.dags[dst]
        active_dag = dag.copy()
        action = [active_dag.remove_edge(u,v) for (u,v, data) in
                  active_dag.edges(data=True) if data['active'] == False]
        return active_dag
    
    def getActivePaths(self, src_iface, dst_iface, dst_prefix):
        """Returns the current active path between two host interface ips and
        the destination prefix for which we want to retrieve the
        current active path.
        
        :param src_iface, dst_iface: ipaddres.ip_interface object

        :param dst_prefix: string representing the destination prefix
                           (i.e: 192.168.225.0/25).
        """
        # Get current active DAG for that destination
        active_dag = self.getActiveDag(dst_prefix)

        # Get src_iface and dst_iface attached routers
        routers = list(self.network_graph.routers)
        src_rid = None
        dst_rid = None

        for r in routers:
            if self.network_graph.has_successor(r, src_iface.network.compressed):
                d = src_iface.network.compressed
                if self.network_graph[r][d]['fake'] == False:
                    src_rid = r

            if self.network_graph.has_successor(r, dst_iface.network.compressed):
                d = dst_iface.network.compressed
                if self.network_graph[r][d]['fake'] == False:
                    dst_rid = r
                    
        if src_rid and dst_rid:
            # Calculate path and return it
            active_paths = self._getAllPathsLimDAG(active_dag, src_rid, dst_rid, 0)
            return active_paths
        else:
            t = time.strftime("%H:%M:%S", time.gmtime())
            to_print = "%s - getActivePaths(): No paths could be found between %s and %s for subnet prefix %s\n"
            log.info(to_print%(t, str(src_iface), str(dst_iface), dst_prefix))
            return [[]]
        
    def _createInitialDags(self):
        """Populates the self.dags attribute by creating a complete DAG for
        each destination. In other words, a DAG representing
        all-shortest paths from any router in the network towards each
        destination.
        """

        # Log it
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Creating initial DAGs\n"%t)
        pairs_already_logged = []
                           
        apdp = nx.all_pairs_dijkstra_path(self.initial_graph, weight='metric')
        
        for prefix in self.network_graph.prefixes:
            dag = nx.DiGraph()

            # Get IP of the connected router
            cr = [r for r in self.network_graph.routers if self.network_graph.has_successor(r, prefix)][0]
            
            # Get subnet prefix
            subnet_prefix = prefix
            
            other_routers = [rn for rn in self.network_graph.routers if rn != cr]

            for r in other_routers:
                
                # Get the shortest path
                dpath = apdp[r][cr]
                
                # Are there possibly more paths with the same cost? Let's check:
                # Get length of the default dijkstra shortest path
                dlength = self.getPathLength(dpath)

                # Get all paths with length equal to the defaul path length
                default_paths = self._getAllPathsLim(self.initial_graph, r, cr, dlength)

                if len(default_paths) > 1:
                    # ECMP is happening
                    ecmp = True
                    if (cr, r) not in pairs_already_logged and (r, cr) not in pairs_already_logged:
                        to_print = "\tECMP is ACTIVE between %s and %s. There are %d paths with equal cost of %d\n"
                        log.info(to_print%(self.db.getNameFromIP(cr), self.db.getNameFromIP(r), len(default_paths), dlength))
                        pairs_already_logged.append((cr, r))
                    
                elif len(default_paths) == 1:
                    ecmp = False
                    default_paths = [dpath]

                else:
                    t = time.strftime("%H:%M:%S", time.gmtime())
                    log.info("%s - _createInitialDags(): ERROR. At least there should be a path\n"%t)
                    
                # Iterate through paths and add edges to DAG
                for path in default_paths:
                    edge_list = zip(path[:-1], path[1:])
                    for (u,v) in edge_list:
                        if self.network_graph.is_router(u) and self.network_graph.is_router(v):
                            dag.add_edge(u,v)
                            edge_data = dag.get_edge_data(u,v)
                            edge_data['active'] = True
                            edge_data['fibbed'] = False
                            edge_data['default'] = True
                            edge_data['ongoing_flows'] = False

            # Add DAG to prefix
            self.dags[subnet_prefix] = dag
    
    def getEdgesFromPathList(self, path_list):
        """Given a list of paths, returns a list of all the edges contained
        in these paths.
        """
        edge_list = []
        for path in path_list:
            edge_list += zip(path[:-1], path[1:])
        return edge_list

    def isFibbed(self, dst_prefix):
        """Returns true if there exist fake LSA for that prefix in the
        network.

        TODO: probably must be changed...
        
        """
        return (self.getLiesFromPrefix(dst_prefix) != [])

    def isFibbedPath(self, dst_prefix, path):
        """Returns True if it finds a fibbed edge active along the path in
        dst_prefix DAG
        """
        currentDag = self.getCurrentDag(dst_prefix)        
        for (u,v) in zip(path[:-1], path[1:]):
            edge_data = currentDag.get_edge_data(u,v)
            if edge_data['fibbed'] == True and edge_data['active'] == True:
                # Fake edge found
                return True
        return False

    def addAllocationEntry(self, prefix, flow, path_list):
        """Add entry in the flow_allocation table.
        
        :param prefix: destination prefix. Expressed as an
                       IPv4Interface object
        
        :param path_list: List of paths for which this flow will be
                          multi-pathed towards destination prefix:
                          [[A, B, C], [A, D, C]]"""

        if prefix not in self.flow_allocation.keys():
            # prefix not in table
            self.flow_allocation[prefix] = {flow : path_list}
        else:
            self.flow_allocation[prefix][flow] = path_list
            
        # Loggin a bit...
        t = time.strftime("%H:%M:%S", time.gmtime())
        to_print = "%s - flow ALLOCATED to Paths\n"
        log.info(to_print%t)
        log.info("\t* Dest_prefix: %s\n"%prefix)
        log.info("\t* Paths (%s): %s\n"%(len(path_list), str([self.toLogRouterNames(path) for path in path_list])))
        log.info("\t* Flow: %s\n"%self.toLogFlowNames(flow))
                        
        # Current dag for destination
        current_dag = self.getCurrentDag(prefix)
        
        # Iterate the paths
        for path in path_list:
            # Calculate paths with only routers
            path_only_routers = [p for p in path if self.network_graph.is_router(p)]

            # Extract the edges of the path
            edges = zip(path_only_routers[:-1], path_only_routers[1:])
            
            # Modify first the current destination dag: ongoing_flows = True
            current_dag = self.switchDagEdgesData(current_dag, edges, ongoing_flows=True)
                        
        # Set the current dag
        self.setCurrentDag(prefix, current_dag)

        # Define the removeAllocatoinEntry thread
        t = threading.Thread(target=self.removeAllocationEntry, args=(prefix, flow))
        # Start the thread
        t.start()
        # Add handler to list and start thread
        self.thread_handlers[flow] = t
   
    def removeAllocationEntry(self, prefix, flow):
        """
        Removes the flow from the allocation entry prefix and restores the corresponding.
        """
        # Wait until flow finishes
        time.sleep(flow['duration']) 
        
        # Acquire locks for self.flow_allocation and self.dags
        # dictionaries
        self.flowAllocationLock.acquire()
        self.dagsLock.acquire()
        
        log.info(lineend)
        if prefix not in self.flow_allocation.keys():
            # prefix not in table
            raise KeyError("The is no such prefix allocated: %s"%str(prefix))
        else:
            if flow in self.flow_allocation[prefix].keys():
                path_list = self.flow_allocation[prefix].pop(flow, None)
            else:
                raise KeyError("%s is not alloacated in this prefix %s"%str(repr(flow)))

        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Flow REMOVED from Paths\n"%t)
        log.info("\t* Dest_prefix: %s\n"%prefix)
        log.info("\t* Paths (%s): %s\n"%(len(path_list), str([self.toLogRouterNames(path) for path in path_list])))
        log.info("\t* Flow: %s\n"%self.toLogFlowNames(flow))

        # Check first how many ECMP paths are there
        ecmp_paths = float(len(path_list))

        # Current dag for destination
        current_dag = self.getCurrentDag(prefix)
        
        # Get the active Dag
        activeDag = self.getActiveDag(prefix)
        log.info("\t* removeAllocationEntry: initial DAG\n\t  %s\n"%str(self.toLogDagNames(activeDag).edges()))
        
        # Get current remaining allocated flows for destination
        remaining_flows = self.getAllocatedFlows(prefix)

        # Acumulate edges for which there are flows ongoing
        ongoing_edge_list = []
        for (f, f_path_list) in remaining_flows:
            ongoing_edge_list += self.getEdgesFromPathList(f_path_list)
                
        # Iterate the path_list
        for path in path_list:
            # Get paths with only routers
            path_only_routers = [p for p in path if self.network_graph.is_router(p)]
            
            # Calculate edges of the path
            edges = zip(path_only_routers[:-1], path_only_routers[1:])
            
            # Calculate which of these edges can be set to ongoing_flows = False
            edges_without_flows = [(u, v) for (u, v) in edges if (u, v) not in ongoing_edge_list]
            
            # Set them
            current_dag = self.switchDagEdgesData(current_dag, edges_without_flows, ongoing_flows=False)
            
        # Set the new calculated dag to its destination prefix dag
        self.setCurrentDag(prefix, current_dag)
        
        # Remove the lies for the given prefix
        self.removePrefixLies(prefix, path_list)

        # Release locks
        self.flowAllocationLock.release()
        self.dagsLock.release()
        
        
    def removePrefixLies(self, prefix, path_list):
        """Remove lies for a given prefix only if there are no more flows
        allocated for that prefix flowing through some edge of
        path_list.

        :param prefix: subnet prefix

        :param path_list: List of paths from source to
                          destination. E.g: [[A,B,C],[A,D,C]]
        """
        # log a bit
        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Removing existing lies...\n"%t)

        # Get the current DAG for that prefix
        current_dag = self.getCurrentDag(prefix)

        # Get the active Dag
        activeDag = self.getActiveDag(prefix)

        to_log = "\t* removePrefixLies: initial DAG\n\t  %s\n"
        log.info(to_log%str(self.toLogDagNames(activeDag).edges()))

        # Check if fibbed edge in paths
        thereIsFibbedPath = False
        for path in path_list:
            thereIsFibbedPath = thereIsFibbedPath or self.isFibbedPath(prefix, path)

        if not thereIsFibbedPath:
            # Paths for this flow are not fibbed
            to_print = "\t* No fibbed edges found in paths %s for prefix: %s\n"
            log.info(to_print%(str(self.toLogRouterNames(path_list)), prefix))

        else:
            # Get the lies for prefix
            lsa = self.getLiesFromPrefix(prefix)
            
            to_log = "\t* Found fibbed edges in paths: %s\n"
            log.info(to_log%str(self.toLogRouterNames(path_list)))

            # Fibbed prefix
            # Let's check if there are other flows for prefix fist
            allocated_flows = self.getAllocatedFlows(prefix)

            # Check if there are flows to prefix going through some
            # path in path_list. If not, we can delete the
            # lies. Otherwise, we must wait.
            if allocated_flows == []:
                log.info("\t* No allocated flows remain for prefix\n")
                # Obviously, if no flows are found, we can already
                # remove the lies.

                # Set the DAG for the prefix destination to its
                # original version
                path_list_edges = []
                for path in path_list:
                    path_list_edges += zip(path[:-1], path[1:])
            
                # Remove edges from initial paths
                for path in path_list:
                    for node in path:
                        # Set edges to initial situation (fibbed=True,
                        # active=False) and (fibbed=False, active=True)
                        default_edges = self.getDefaultEdges(current_dag, node)
                        current_dag = self.switchDagEdgesData(current_dag, default_edges, active=True)
                        
                        fibbed_edges = self.getFibbedEdges(current_dag, node)
                        current_dag = self.switchDagEdgesData(current_dag, fibbed_edges, active=False)

                # Set current Dag
                self.setCurrentDag(prefix, current_dag)

                # Get the active Dag
                activeDag = self.getActiveDag(prefix)
                
                log.info("\t* removePrefixLies: final DAG\n\t  %s\n"%str(self.toLogDagNames(activeDag).edges()))
                
                # Get the active Dag
                activeDag = self.getActiveDag(prefix)

                # Force it to fibbing
                self.sbmanager.add_dag_requirement(prefix, activeDag.copy())

                # Log it
                log.info("\t* Removed lies for prefix: %s\n"%prefix)
                log.info("\t* LSAs: %s\n"%(str(lsa)))
                
            else:
                log.info("\t* Some flows for prefix still remain ongoing\n")
                canRemoveLSA = True

                # Collect first the edges of the paths to remove
                path_edges_list = []
                for path in path_list:
                    path_edges_list += zip(path[:-1], path[1:])

                log.info("Edges of the paths to remove: %s\n"%self.toLogRouterNames(path_edges_list))
                for (flow, flow_path_list) in allocated_flows:
                    log.info("flow: %s, path: %s\n"%(self.toLogFlowNames(flow), self.toLogRouterNames(flow_path_list)))
                    # Get all edges used by flows sending to same
                    # destination prefix
                    flow_edges_list = []
                    for flow_path in flow_path_list:
                        flow_edges_list += zip(flow_path[:-1], flow_path[1:])

                    check = [True if (u,v) in path_edges_list else False for (u,v) in flow_edges_list]
                    log.info("CHECK list: %s\n"%str(check))
                    if sum(check) > 0:
                        # Do not remove lsas yet. Other flows ongoing
                        # in one of the paths in path_list
                        canRemoveLSA = False
                        break
                        
                if canRemoveLSA == False:
                    # Just log it
                    flows = [f for (f, p) in allocated_flows]
                    to_print = "\t* Lies for prefix %s not removed. Flows yet ongoing:\n"
                    log.info(to_print%prefix)
                    for f in flows:
                        log.info("\t\t%s\n"%(self.toLogFlowNames(f)))
                else:
                    # Set the DAG for the prefix destination to its
                    # original version
                    path_list_edges = []
                    for path in path_list:
                        path_list_edges += zip(path[:-1], path[1:])
            
                    # Remove edges from initial paths
                    for path in path_list:
                        for node in path:
                            # Set edges to initial situation (fibbed=True,
                            # active=False) and (fibbed=False, active=True)
                            default_edges = self.getDefaultEdges(current_dag, node)
                            current_dag = self.switchDagEdgesData(current_dag, default_edges, active=True)
                        
                            fibbed_edges = self.getFibbedEdges(current_dag, node)
                            current_dag = self.switchDagEdgesData(current_dag, fibbed_edges, active=False)

                    self.setCurrentDag(prefix, current_dag)
                
                    # Get the active Dag
                    activeDag = self.getActiveDag(prefix)

                    # Force it to fibbing
                    self.sbmanager.add_dag_requirement(prefix, activeDag.copy())
                
                    # Log it
                    log.info("\t* Removed lies for prefix: %s\n"%prefix)
                    log.info("\t* LSAs: %s\n"%(str(lsa)))

        log.info(lineend)
           
    def getDefaultDijkstraPath(self, network_graph, flow):
        """Returns an list of network nodes representing the default Dijkstra
        path given the flow and a network graph.

        """        
        # We assume here that Flow is well formed, and that the
        # interface addresses of the hosts are given.
        src_name = self.db.getNameFromIP(flow['src'].compressed)
        src_router_name, src_router_id = self.db.getConnectedRouter(src_name)
        dst_network = flow['dst'].network.compressed

        # We take only routers in the route
        route = nx.dijkstra_path(network_graph, src_router_id, dst_network, weight='metric')
        return route

    def getPathLength(self, path):
        """Given a path as a list of traversed routers, it returns the sum of
        the weights of the traversed links along the path.
        """
        routers = [n for n in path if self.initial_graph.is_router(n)]
        edges = [self.initial_graph.get_edge_data(u,v)['metric'] for
                 (u,v) in zip(path[:-1], path[1:])]
        return sum(edges)

    def canAllocateFlow(self, flow, path_list):
        """Returns true if there is at least flow.size bandwidth available in
        all links along the path (or multiple paths in case of ECMP)
        from flow.src to src.dst,
        """
        for path in path_list:
            if self.getMinCapacity(path) < flow.size:
                return False
        return True

    def getMinCapacity(self, path):
        """Returns the minimum capacity of the edges along the path.
        
        :param path: List of network nodes defining a path [A, B, C, D]"""
        caps_in_path = []
        for (u,v) in zip(path[:-1], path[1:]):
            edge_data = self.initial_graph.get_edge_data(u, v)
            cap = edge_data.get('capacity', None)
            caps_in_path.append(cap)
        try:
            mini = min(caps_in_path)
            return mini
        
        except ValueError:
            t = time.strftime("%H:%M:%S", time.gmtime())
            log.info("%s - getMinCapacity(): ERROR: min could not be calculated\n"%t)
            log.info("\t* Path: %s\n"%path)            
            raise ValueError

    def getMinCapacityEdge(self, path):
        """Returns the edge with the minimum capacity along the path.

        :param path: List of network nodes defining a path [A, B, C,
        D]
        """
        edges_in_path = [((path[i], path[i+1]),
                          self.initial_graph.get_edge_data(path[i],
                                                           path[i+1])['capacity']) for i in
                         range(len(path)-1) if 'capacity' in
                         self.initial_graph.get_edge_data(path[i],
                                                          path[i+1]).keys()]
        if edges_in_path:
            minim_c = edges_in_path[0][1]
            minim_edge = edges_in_path[0][0]
            for ((x,y), c) in edges_in_path:
                if c < minim_c:
                    minim_c = c
                    minim_edge = (x,y)
            return minim_edge
        else:
            raise StandardError("%s has no edges!"%str(path))
        
    def getAllocatedFlows(self, prefix):
        """
        Given a prefix, returns a list of tuples:
        [(flow, path), (flow, path), ...]
        """
        if prefix in self.flow_allocation.keys():
            return [(f, p) for f, p in self.flow_allocation[prefix].iteritems()]
        else:
            t = time.strftime("%H:%M:%S", time.gmtime())
            to_print = "%s - getAllocatedFlows(): WARNING: "
            to_print += "prefix %s not yet in flow_allocation table\n"
            #log.info(to_print%(t, prefix))
            return []
        
    def getFlowSizes(self, prefix):
        """Returns the sum of flows with destination prefix, and how many
        flows there are
        """
        allocated_flows = self.getAllocatedFlows(prefix)
        sizes = [f['size'] for (f, p) in allocated_flows]
        return sum(a), len(a)
    
    def getLiesFromPrefix(self, prefix):
        """Retrieves the LSA of the associated prefix from the southbound
        manager.

        """
        lsa_set = self.sbmanager.advertized_lsa.copy()
        lsa_list = []
        while lsa_set != set():
            lsa = lsa_set.pop()
            dst = lsa.dest
            if prefix == dst:
                lsa_list.append(lsa)
        return lsa_list
        
    def getNetworkWithoutFullEdges(self, network_graph, flow_size):
        """Returns a nx.DiGraph representing the network graph without the
        edge that can't allocate a flow of flow_size.
        
        :param network_graph: IGPGraph representing the network.

        :param flow_size: Attribute of a flow defining its size (in bytes).
        """
        ng_temp = network_graph.copy()
        for (x, y, data) in network_graph.edges(data=True):
            cap = data.get('capacity')
            if cap and cap <= flow_size and self.network_graph.is_router(x) and self.network_graph.is_router(y):
                edge = (x, y)
                ng_temp.remove_edge(x, y) 
        return ng_temp
    
    def getAllPathsRanked(self, igp_graph, start, end, ranked_by='length'):
        """Recursive function that returns an ordered list representing all
        paths between node x and y in network_graph. Paths are ordered
        in increasing length.
        
        :param igp_graph: IGPGraph representing the network
        
        :param start: router if of source's connected router

        :param end: compressed subnet address of the destination
                    prefix."""
        paths = self._getAllPathsLim(igp_graph, start, end, 0)
        if ranked_by == 'length':
            ordered_paths = self._orderByLength(paths)
        elif ranked_by == 'capacity':
            ordered_paths = self._orderByCapacityLeft(paths)
        return ordered_paths
    
    def _getAllPathsLim(self, igp_graph, start, end, k, path=[], len_path=0, die=False):
        """Recursive function that finds all paths from start node to end
        node with maximum length of k.
        """
        if die == False:
            # Accumulate path length first
            if path == []:
                len_path = 0
            else:
                last_node = path[-1]
                len_path += igp_graph.get_edge_data(last_node, start)['metric']
                
            # Accumulate nodes in path
            path = path + [start]
        
            if start == end:
                # Arrived to the end. Go back returning everything
                if k == 0:
                    return [path]
                elif len_path < k+1:
                    return [path]
                else:
                    self._getAllPathsLim(igp_graph, start, end, k, path=path, len_path=len_path, die=True)
            
            if not start in igp_graph:
                return []

            paths = []
            for node in igp_graph[start]:
                if node not in path: # Ommiting loops here
                    if k == 0:
                        # If we do not want any length limit
                        newpaths = self._getAllPathsLim(igp_graph, node, end, k, path=path, len_path=len_path)
                        for newpath in newpaths:
                            paths.append(newpath)
                    elif len_path < k+1:
                        newpaths = self._getAllPathsLim(igp_graph, node, end, k, path=path, len_path=len_path)
                        for newpath in newpaths:
                            paths.append(newpath)
            return paths
        else:
            # Recursive call dies here
            pass

    def _getAllPathsLimDAG(self, dag, start, end, k, path=[]):
        """Recursive function that finds all paths from start node to end node
        with maximum length of k.

        If the function is called with k=0, returns all existing
        loopless paths between start and end nodes.

        :param dag: nx.DiGraph representing the current paths towards
                    a certain destination.

        :param start, end: string representing the ip address of the
                           star and end routers (or nodes) (i.e:
                           10.0.0.3).

        :param k: specified maximum path length (here means hops,
                  since the dags do not have weights).

        """
        # Accumulate nodes in path
        path = path + [start]
        
        if start == end:
            # Arrived to the end. Go back returning everything
            return [path]
            
        if not start in dag:
            return []

        paths = []
        for node in dag[start]:
            if node not in path: # Ommiting loops here
                if k == 0:
                    # If we do not want any length limit
                    newpaths = self._getAllPathsLimDAG(dag, node, end, k, path=path)
                    for newpath in newpaths:
                        paths.append(newpath)
                elif len(path) < k+1:
                    newpaths = self._getAllPathsLimDAG(dag, node, end, k, path=path)
                    for newpath in newpaths:
                        paths.append(newpath)
        return paths

    def _orderByLength(self, paths):
        """Given a list of arbitrary paths. It ranks them by lenght (or total
        edges weight).

        """
        # Search for path lengths
        ordered_paths = []
        for path in paths:
            pathlen = 0
            for (u,v) in zip(path[:-1], path[1:]):
                if self.network_graph.is_router(v):
                    pathlen += self.network_graph.get_edge_data(u,v)['metric']
            ordered_paths.append((path, pathlen))

        # Now rank them
        ordered_paths = sorted(ordered_paths, key=lambda x: x[1])
        return ordered_paths

    
    def _orderByCapacityLeft(self, paths):
        """Given a list of arbitrary paths. It ranks them by capacity left (or
        total edges weight).

        Function is implemented in TEControllerLab1
        """
        pass
    

    def toLogDagNames(self, dag):
        """
        """
        dag_to_print = nx.DiGraph()
        
        for (u,v, data) in dag.edges(data=True):
            u_temp = self.db.getNameFromIP(u)
            v_temp = self.db.getNameFromIP(v)
            dag_to_print.add_edge(u_temp, v_temp, **data)
        return dag_to_print
    
    def toLogRouterNames(self, path_list):
        """
        """
        total = []
        if isinstance(path_list[0], list):
            for path in path_list:
                r = [self.db.getNameFromIP(p) for p in path if self.network_graph.is_router(p)] 
                total.append(r)
            return total
        elif isinstance(path_list[0], tuple):
                r = [(self.db.getNameFromIP(u),
                      self.db.getNameFromIP(v)) for (u,v) in path_list
                     if self.network_graph.is_router(u) and self.network_graph.is_router(v)] 
                return r
        else:
            return [self.db.getNameFromIP(p) for p in path_list if self.network_graph.is_router(p)] 

    def toLogFlowNames(self, flow):
        a = "Flow[(%s -> %s): %s, t_o: %s, duration: %s]" 
        return a%(self.db.getNameFromIP(flow.src.compressed),
                  self.db.getNameFromIP(flow.dst.compressed),
                  flow.setSizeToStr(flow.size),
                  flow.setTimeToStr(flow.start_time),
                  flow.setTimeToStr(flow.duration))  
class feedbackThread(threading.Thread):
    """
    """
    def __init__(self, requestQueue, responseQueue):
        super(feedbackThread, self).__init__()

        # Create queue attributes
        self.requestQueue = requestQueue
        self.responseQueue = responseQueue

        # Read network database
        self.db = DatabaseHandler()

        # Fill router cap files
        self.capFilesDict = self.pickCapFiles()

        # Data structure that maintains a set of current flows passing
        # through each router in the last second
        self.router_flowsets = {}
        self.updateRouterFlowSets()
            
    def run(self):
        """
        A dictionary of flow -> possible path list is read from the requestQueue.

        A dictionary indexed by flow -> allocated path is returned
        """
        queueLookupPeriod = 2 #seconds
        while True:
            try:
                requestFlowsDict = self.requestQueue.get(timeout=queueLookupPeriod) # Blocking read
            except:
                # Update flow sets for each router
                self.updateRouterFlowSets()
            else:
                #log.info("*** FEEDBACK REQUEST RECEIVED:\n")
                #log.info("     %s\n"%str(requestFlowsDict))
                self.updateRouterFlowSets()
                responsePathDict = self.dealWithRequest(requestFlowsDict)
                if responsePathDict != {}:
                    self.responseQueue.put(responsePathDict)                    
                

    def updateRouterFlowSets(self):
        for rid, capfile in self.capFilesDict.iteritems():
            lines = capfile.readlines()
            # Create new empty set
            ridSet = set()
            for line in lines:
                try:
                    # Parse ip's 
                    src_tmp = line.split(' ')[2]
                    src_ip_tmp = src_tmp.split('.')[:4]
                    src_ip = ipaddress.ip_address('.'.join(map(str, src_ip_tmp)))
                    dst_tmp = line.split(' ')[4].strip(':')
                    dst_ip_tmp = dst_tmp.split('.')[:4]
                    dport = dst_tmp.split('.')[4]
                    dst_ip = ipaddress.ip_address('.'.join(map(str, dst_ip_tmp)))
                    ridSet.update({((src_ip, 's'), (dst_ip, 'd'), dport)})
                except:
                    pass
                
            # Add set into dictionary
            self.router_flowsets[rid] = ridSet

    def dealWithRequest(self, requestFlowsDict):
        """
        """
        # Results are saved here
        responsePathDict = {}
        
        start_time = time.time()
        for f, pl in requestFlowsDict.iteritems():

            #flowsSet.update({(f.src, f.sport, f.dst, f.dport)})
            # We can't fix the source port from iperf client, so it
            # will never match. This implies that same host can't same
            # two UDP flows to the same destination host.
            flowSet = set()
            flowSet.update({((f.src.ip, 's'), (f.dst.ip, 'd'), str(f.dport))})
            
            # Set of routers containing flow
            routers_containing_flow = {self.db.getIpFromHostName(rid) for rid, rset in
                                       self.router_flowsets.iteritems() if
                                       rset.intersection(flowSet) != set()}

            #log.info("*** SEARCHING:\n")
            #log.info("     - %s\n"%f)
            #log.info("     - %s\n"%str(list(routers_containing_flow)))
            
            # Iterate path list and choose which of them is the one in
            # which the flow is allocated
            pathSetList = [(p, set(p)) for p in pl]

            # Retrieve path that matches
            chosen_path = [(p, pset) for (p, pset) in pathSetList if pset == routers_containing_flow]
            if len(chosen_path) == 1:
                responsePathDict[f] = chosen_path[0][0]
                
            elif len(chosen_path) == 0:
                pass
            
            else:
                log.info("*** FEEDBACK THREAD ERROR\n")

        return responsePathDict

    
    def pickCapFiles(self):
        """
        Returns a dictionary indexed by router id -> corresponding .cap file
        """
        return {rid: open(dconf.CAP_Path+rid+'.cap', 'r') for rid in self.db.routers_to_ip.keys()}
class LinksMonitorThread(threading.Thread):
    """This class defines a thread that will be spawned by the TEController 
    algorithm in order to periodically update the available capacities
    for the network links.

    It is passed a capacity graph and a lock from its parent, and it
    modifies it periodically.
    """
    def __init__(self, capacity_graph, lock, logfile, median_filter=False, interval=1.01):
        super(LinksMonitorThread, self).__init__()
        # Read network database
        self.db = DatabaseHandler()

        # Lock object to access capacity graph
        self.lock = lock

        # Counters read interval
        self.interval = interval

        # Capacity graph object
        self.cg = capacity_graph

        # Dictionary that holds the binding between router id and the
        # router ip in the control network
        self.ip_to_control = {}
        
        # Start router counters
        self.counters = self._startCounters()

        # Perform median filter or not?
        self.median_filter = median_filter
        
        # Start router-to-router links
        self.links = self._startLinks()

        # Used internally for the logs
        self.link_to_edge_bindings = self._createLinkToEdgeBindings()

        # Set log file
        if logfile:
            self.logfile = logfile
            # Write first line with links
            with open(self.logfile, 'w') as f:
                f.write(self.printLinkToEdgesLine(self.cg))
        else:
            self.logfile = None

    def _createLinkToEdgeBindings(self):
        bindings = {}
        taken = []
        i = 0
        for (u, v) in self.cg.edges():
            #if (u, v) in taken or (v, u) in taken:
            #    continue
            #else:
            #taken.append((u,v))
            bindings[i] = (u, v)
            i = i + 1
        return bindings
            
    def run(self):
        while True:
            start_time = time.time()
            
            # Read capacities from SNMP
            self.updateLinksCapacities()

            # Log them in the log file too
            if self.logfile:
                self.logLinksLoads()
            #log.info("It took %.3f to update and log the new capacities readout\n"%(time.time()-start_time))
            
            # Go to sleep remaining time to interval
            elapsed_time = (time.time() - start_time)
            if elapsed_time < self.interval:
                sleep_time = self.interval - elapsed_time
                time.sleep(sleep_time)
            else:
                pass
            
    def updateLinksCapacities(self):
        """
        """
        # Update counters first
        self._updateCounters()
        
        # List in which we hold the already updated interfaces
        interfaces_updated = []
        
        for router, counter in self.counters.iteritems():
            
            # Get router interfaces names
            iface_names = [data['name'] for data in counter.interfaces]
              
            # Get current loads for router interfaces 
            # (difference from last read-out)
            loads = counter.getLoads()

            # Get the time that has elapsed since last read-out
            elapsed_time = counter.timeDiff
            currentThroughputs = loads/float(elapsed_time)
              
            # Retrieve the bandwidths for router-connected links
            bandwidths = []

            for ifacename in iface_names:
                # Get bandwidth for link in that interface
                bw_tmp = [edge_data['bw'] for edge, edge_data in
                          self.links.iteritems() if
                          edge_data['interface'] == ifacename]

                if bw_tmp != []:
                    bandwidths.append(bw_tmp[0])
                else:
                    bandwidths.append(0)

            # Convert as a numpy array
            bandwidths = np.asarray(bandwidths)

            # Calculate available capacities
            availableCaps = bandwidths - currentThroughputs

            # Set link available capacities by interface name
            # Get lock first
            for i, iface_name in enumerate(iface_names):
                if iface_name not in interfaces_updated:
                    iface_availableCap = availableCaps[i]

                    # Get interface of other side of the link
                    edge = [edge for edge, data in self.links.iteritems() if data['interface'] == iface_name]
                    if edge == []:
                        # Means is not a router-to-router link
                        pass
                    else:
                        (x,y) = edge[0]
                        #iface_opposed_name = self.links[(y,x)]['interface']
                        self.updateLinkCapacity(iface_name, iface_availableCap)
                        #self.updateLinkCapacity(iface_opposed_name, iface_availableCap)
                        interfaces_updated.append(iface_name)
                        #interfaces_updated.append(iface_opposed_name)
                    
    def logLinksLoads(self):
        # Make a copy of the self.cg and release the lock
        with self.lock:
            cg_copy = self.cg.copy()
            
        with open(self.logfile, 'a') as f:
            s = "%s"%time.time()
            to_iterate = sorted(self.link_to_edge_bindings.keys())
            for index in to_iterate:
                (x,y) = self.link_to_edge_bindings[index]
                link_index = index
                availableCapactiy = cg_copy[x][y]['capacity']
                bandwidth = cg_copy[x][y]['bw']
                usedCapacity = bandwidth - availableCapactiy
                load = (usedCapacity/float(bandwidth))*100.0
                s += ",(L%d %.3f%%)"%(link_index, load)
            s += '\n'
            f.write(s)
           
    def _updateCounters(self):
        """Updates all interface counters of the routers in the network.
        Blocks until the counters have been updated.
        """
        for r, counter in self.counters.iteritems():
            #start_time = time.time()
            while (counter.fromLastLecture() < self.interval):
                pass
            #log.info("I was stuck %.3f seconds waiting for interval to pass\n"%(time.time()-start_time))
            counter.updateCounters32()

    def updateLinkCapacity(self, iface_name, new_capacity):
        # Get nodes from the link with such iface_name
        edge = [edge for edge, data in self.links.iteritems() if
                data['interface'] == iface_name]
        if edge != []:
            (x, y) = edge[0]
        else:
            return
        
        with self.lock:
            if self.median_filter == True:
                # Perform median filter of window size = 3
                window = self.cg[x][y]['window']
                cap = self.cg[x][y]['capacity']
                
                if len(window) == 3: #median filter window size = 3
                    # Remove last element
                    window.pop()

                # Add new capacity readout to filter window
                window = [new_capacity] + window

                # Perform the median filtering
                # Sort them by magnitude
                window_ordered = window[:]
                window_ordered.sort()

                # Take the median element
                chosen_cap = window_ordered[len(window_ordered)/2] 

                # Update edge data
                self.cg[x][y]['window'] = window
                self.cg[x][y]['capacity'] = chosen_cap
        
            else:
                window = self.cg[x][y]['window']

                if len(window) == 3:
                    window.pop()
                    # Rate of new capacity wrt previous one
                    rate = new_capacity/float(window[0])
                    if rate < 2.05 and rate > 1.95:
                        # Double read-out found
                        new_capacity = window[0]
            
                        window = [new_capacity] + window
                
                        # No median filter but we store also the last 3
                        # capacities in the window
                        self.cg[x][y]['window'] = window
                        self.cg[x][y]['capacity'] = new_capacity
                    else:
                        window = [new_capacity] + window
                        self.cg[x][y]['window'] = window
                        self.cg[x][y]['capacity'] = new_capacity
                else:
                    window = [new_capacity] + window
                    self.cg[x][y]['window'] = window
                    self.cg[x][y]['capacity'] = new_capacity

    def printLinkToEdgesLine(self, capacity_graph):
        s = ""
        to_iterate = sorted(self.link_to_edge_bindings.keys())
        for index in to_iterate:
            (x,y) = self.link_to_edge_bindings[index]
            link_number = index
            x_name = self.db.getNameFromIP(x)
            y_name = self.db.getNameFromIP(y)
            s += ("L%d"%link_number)+'->(%s %s),'%(x_name, y_name)
        s += '\n'
        return s
            
    def _startCounters(self):
        """This function iterates the routers in the network and creates
        a dictionary mapping each router to a SnmpCounter object.
        
        Returns a dict: routerip -> SnmpCounters.
        """
        counters_dict = {}
        with self.lock:
            for r in self.cg.routers:
                # Get control ip for router
                r_control_ip = self.db.getRouterControlIp(r)
                self.ip_to_control[r] = r_control_ip
                if r_control_ip:
                    counters_dict[r] = SnmpCounters(routerIp = r_control_ip)
                else:
                    counters_dict[r] = SnmpCounters(routerIp = r)
        return counters_dict
    
    def _startLinks(self):
        """
        Only router-to-router links are of interest (since we can't modify
        routes inside subnetworks).
        """
        return self.db.getAllRouterEdges()
class TrafficGenerator(Base):
    """Object that creates a Traffic Generator in the network.
    """

    def __init__(self, *args, **kwargs):
        super(TrafficGenerator, self).__init__(*args, **kwargs)

        self.scheduler = sched.scheduler(time.time, time.sleep)
        self.db = DatabaseHandler()
        self.thread_handlers = []

        # IP of the Load Balancer Controller host.
        try:
            self._lbc_ip = ipaddress.ip_interface(self.db.getIpFromHostName(dconf.LBC_Hostname)).ip.compressed
        except:
            log.info("WARNING: Load balancer controller could not be found in the network\n")
            self._lbc_ip = None

    def _signal_handler(self, signal, frame):
        """
        Terminates trafficgenerator thread gracefully.
        """
        log.info("Signal caught... shuting down!\n")

        # collect all open _createFlow threads
        for t in self.thread_handlers:
            # t.join()
            log.info("_createFlow thread terminated\n")

        # exit
        sys.exit(0)

    def informLBController(self, flow):
        """Part of the code that deals with the JSON interface to inform to
        LBController a new flow created in the network.
        """
        url = "http://%s:%s/newflowstarted" % (self._lbc_ip, dconf.LBC_JsonPort)
        log.info("\t Informing LBController\n")
        log.info("\t   * Flow: %s\n" % self.toLogFlowNames(flow))
        log.info("\t   * Url: %s\n" % url)
        try:
            requests.post(url, json=flow.toJSON())
        except Exception:
            log.info("ERROR: LBC could not be informed!\n")
            log.info("LOG: Exception in user code:\n")
            log.info("-" * 60 + "\n")
            log.info(traceback.print_exc())
            log.info("-" * 60 + "\n")

    def toLogFlowNames(self, flow):
        a = "(%s -> %s): %s, t_o: %s, duration: %s"
        return a % (
            self.db.getNameFromIP(flow.src.compressed),
            self.db.getNameFromIP(flow.dst.compressed),
            flow.setSizeToStr(flow.size),
            flow.setTimeToStr(flow.start_time),
            flow.setTimeToStr(flow.duration),
        )

    def createFlow(self, flow):
        """Calls _createFlow in a different Thread (for efficiency)
        """
        # Start thread that will send the Flask request
        t = Thread(target=self._createFlow, name="_createFlow", args=(flow,)).start()
        # Append thread handler to list
        self.thread_handlers.append(t)

    def _createFlow(self, flow):
        """Creates the corresponding iperf command to actually install the
        given flow in the network.  This function has to call
        self.informLBController!
        """
        # Sleep after it is your time to start
        time.sleep(flow["start_time"])

        # Call to informLBController if it is active
        if self._lbc_ip:
            self.informLBController(flow)
            # time.sleep(0.2)

        # Create new flow with hosts ip's instead of interfaces
        # Iperf only understands ip's
        flow2 = Flow(
            src=flow["src"].ip.compressed,
            dst=flow["dst"].ip.compressed,
            sport=flow["sport"],
            dport=flow["dport"],
            size=flow["size"],
            start_time=flow["start_time"],
            duration=flow["duration"],
        )

        url = "http://%s:%s/startflow" % (flow2["src"], dconf.Hosts_JsonPort)

        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Starting Flow\n" % t)
        log.info("\t Sending request to host %s\n" % str(flow["src"]))
        log.info("\t   * Flow: %s\n" % self.toLogFlowNames(flow))
        log.info("\t   * Url: %s\n" % url)

        # Send request to host to start new iperf client session
        try:
            requests.post(url, json=flow2.toJSON())
        except Exception:
            log.info("ERROR: Request could not be sent to Host!\n")
            log.info("LOG: Exception in user code:\n")
            log.info("-" * 60 + "\n")
            log.info(traceback.print_exc())
            log.info("-" * 60 + "\n")

    def stopFlow(self, flow):
        """Instructs host to stop iperf client session (flow).

        """
        flow2 = Flow(
            src=flow["src"].ip.compressed,
            dst=flow["dst"].ip.compressed,
            sport=flow["sport"],
            dport=flow["dport"],
            size=flow["size"],
            start_time=flow["start_time"],
            duration=flow["duration"],
        )

        url = "http://%s:%s/stopflow" % (flow2["src"], dconf.Hosts_JsonPort)

        t = time.strftime("%H:%M:%S", time.gmtime())
        log.info("%s - Stopping Flow\n" % t)
        log.info("\t Sending request to host to stop flow %s\n" % str(flow["src"]))
        log.info("\t   * Flow: %s\n" % self.toLogFlowNames(flow))
        log.info("\t   * Url: %s\n" % url)

        # Send request to host to start new iperf client session
        try:
            requests.post(url, json=flow2.toJSON())
        except Exception:
            log.info("ERROR: Stop flow request could not be sent to Host!\n")

    def createRandomFlow(self):
        """Creates a random flow in the network
        """
        pass

    def scheduleRandomFlows(self, ex_time=60, max_size="40M"):
        """Creates a random schedule of random flows in the network. This will
        be useful later to evaluate the performance of the
        LBController.
        """
        pass

    def scheduleFileFlows(self, flowfile):
        """Schedules the flows specified in the flowfile
        """
        f = open(flowfile, "r")
        flows = f.readlines()
        if flows:
            for flowline in flows:
                flowline = flowline.replace(" ", "").replace("\n", "")
                if flowline != "" and flowline[0] != "#":
                    try:
                        [s, d, sp, dp, size, s_t, dur] = flowline.strip("\n").split(",")
                        # Get hosts IPs
                        src_iface = self.db.getIpFromHostName(s)
                        dst_iface = self.db.getIpFromHostName(d)

                    except Exception:
                        log.info("EP, SOMETHING HAPPENS HERE\n")
                        src_iface = None
                        dst_iface = None

                    if src_iface != None and dst_iface != None:
                        flow = Flow(
                            src=src_iface, dst=dst_iface, sport=sp, dport=dp, size=size, start_time=s_t, duration=dur
                        )
                        # Schedule flow creation
                        self.scheduler.enter(0, 1, self.createFlow, ([flow]))
                    else:
                        log.info("ERROR! Hosts %s and/or %s do not exist in the network!\n" % (s, d))

            # Make the scheduler run after file has been parsed
            self.scheduler.run()
        else:
            log.info("\t No flows to schedule in file\n")