Пример #1
0
 def runPSDecay(self):
     Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "runPSDecay::," + "Enter", DebugCat.DEBUG_CAT_PSALGO)
     while True:
         Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "runPSDecay::," + "new cycle", DebugCat.DEBUG_CAT_PSALGO)
         self.track_decaycycles.append(self.env.now)
         if(self.env.now > self.psalgo_start_time): 
             # run the decay cycle for every node
             for each_node in self.RMInstance.node_network.get_Nodes():
                 each_node.psalgoprops.cycleDecay()
         
         # wait for TDECAY seconds
         yield self.env.timeout(SimParams.PSALGO_TDECAY)
Пример #2
0
    def _ACQueryRequestHelper(self, video_specs, sample_task):
        # tell RM to add the new stream to it's runtime app model - temporary admission
        gop_tasks = self._getVideoStreamGoPTasks(
            self.env.now, video_specs["wf_id"],
            sample_task.get_video_stream_id())
        self.RM_instance.RuntimeApp_addStream(video_specs["wf_id"], gop_tasks,
                                              self.env.now)

        # perform the admission controller check
        result = self.RM_instance.StreamAdmission_NewVideo_ET(video_specs)

        if (result == False):  # set all tasks in video to false
            # remove previously added stream - reverse admission
            self.RM_instance.RuntimeApp_removeStream(
                video_specs["wf_id"], video_specs["vid_strm_id"])
            # disable disapatch of all tasks in video stream
            self._disableAllTaskDispatchInVS(video_specs["wf_id"],
                                             sample_task.get_video_stream_id())
            # record rejection
            self.rejectedVideoStreams.append(video_specs)

            Debug.PPrint(
                "%f" % self.env.now + "," + self.label + "," +
                '_ACQueryRequestHelper::, : vid_rejected :' +
                str(video_specs["wf_id"]) + " - " +
                str(sample_task.get_video_stream_id()),
                DebugCat.DEBUG_CAT_TDINFO)

            return False
        else:
            return True
    def MSSignallingFlowComplete(self, flow, type):

        Debug.PPrint(
            "%f" % self.env.now + "," + self.label + "," +
            'MSSignallingFlowComplete::, : Enter - flowid=' +
            str(flow.get_id()), DebugCat.DEBUG_CAT_MAPPERINFO)

        if (type == FlowType.FLOWTYPE_MASTERSLAVESIGNALLING_TASKCOMPLETE):
            payload_metadata = flow.get_payload_metadata()
            finished_task_id = payload_metadata['finished_task_id']
            node_id = payload_metadata['node_id']
            strm_info_wfid = payload_metadata['finished_task_strm_info'][0]
            strm_info_vidid = payload_metadata['finished_task_strm_info'][1]
            finished_task_cc = payload_metadata['finished_task_info'][0]
            finished_task_ftype = payload_metadata['finished_task_info'][1]
            finished_task_pri = payload_metadata['finished_task_info'][2]

            if (finished_task_id in self.RM_instance.task_mapping_table):
                self.RM_instance.task_mapping_table[finished_task_id][
                    'status'] = TaskStatus.TASK_COMPLETED

            # update the valatile tm table
            self.volatileTaskMappingTable_RemoveSpecificTask(
                finished_task_id, node_id)

            # update rm task complete cc tracking table
            self._update_task_completed_info(strm_info_wfid, strm_info_vidid,
                                             finished_task_ftype,
                                             finished_task_cc)

        else:
            sys.exit(self.label + ":: MSSignallingFlowComplete : error")

        return []
    def loanRelease_sendMsg(self, rej_cm_list, loan_req_count):
        if (SimParams.CCP_ENABLE == True):
            Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanRelease_sendMsg::, :'  + \
                         ", cm_id="+str(self.node_instance.get_id()) + \
                         ", loan_req_count="+str(loan_req_count), DebugCat.DEBUG_CAT_CCPROTOCOL)

        #pprint.pprint(rej_cm_list)
        for each_item in rej_cm_list:
            payload_metadata = {
                'loan_req_count': loan_req_count,
                'slave_node_id': each_item[1],
                'slave_selected': True,
                'cluster_id': self.get_cluster_id(),
                'cluster_manager_id': self.node_instance.get_id(),
            }

            self._sendMsgFlow(
                finished_task=None,
                src_node_id=self.node_instance.get_id(),
                dst_node_id=each_item[0],  # manager
                payload=SimParams.CCP_BASIC_MSG_FLOWPAYLOADSIZE,
                payload_metadata=payload_metadata,
                type=FlowType.FLOWTYPE_CCP_LOANRELEASE,
                update_flw_table=False)

        self.RMInstance.flow_table.updateTable(fire=True)
    def DataFlowComplete(self, flow):

        Debug.PPrint(
            "%f" % self.env.now + "," + self.label + "," +
            'DataFlowComplete::, : Enter - flowid=' + str(flow.get_id()),
            DebugCat.DEBUG_CAT_MAPPERINFO)

        # set the status of the task in dep buffer to transmission complete
        dst_node_id = flow.get_destination()
        dst_node = self.RM_instance.node_network.get_Node(dst_node_id)

        # notify the destination node that the data has finished transmitting
        #dst_node.processInstance.interrupt('FLWCOMPLETE_'+str(flow.get_id()))
        self.env.process(
            self._interruptNodeAfterDelay(dst_node,
                                          flow.get_id(),
                                          message='FLWCOMPLETE_' +
                                          str(flow.get_id())))

        # notify RM regarding data flow completion, RM needs to update it's global view
        self.env.process(
            self.
            _notifyRMRegardingDataFlowCompletion_viaAnotherFlow_AfterDelay(
                flow))

        return []
Пример #6
0
 def broadcastToNeighbours(self, dose):
     Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "broadcastToNeighbours::," + "Enter", DebugCat.DEBUG_CAT_PSALGO)       
     #pprint.pprint(dose)
     
     neighbours = self.node.resource_manager_instance.interconnect.getNeighbours(self.node.get_id())
     
     if(SimParams.PSALGO_LIMIT_HORMONE_PROPAGATION == True):
         nodes_in_hormone_traversal_list = dose["node_traversal"]
     else:
         nodes_in_hormone_traversal_list = []
     
     num_flows_added = []        
     for neighbour_direction, each_neighbour_node_id in neighbours.iteritems():
         if(each_neighbour_node_id != None):
             if each_neighbour_node_id not in nodes_in_hormone_traversal_list:
                 each_neighbour = self.node.resource_manager_instance.node_network.get_Nodes()[each_neighbour_node_id]
                 #each_neighbour.psalgoprops.cyclePSPropagation(dose)
                 
                 #if(each_neighbour.psalgoprops.get_node_type() != PSAlgorithmNodetype.QUEEN_BEE):
                 # construct and add signal communication to flow table
                 (newpsflow, bl) = self.constructPSFlow(dose, each_neighbour.get_id())
                 # add to the flow table
                 self.node.resource_manager_instance.flow_table.addFlow(newpsflow, self.env.now, bl)                        
                 num_flows_added.append(newpsflow.get_id())
     
     return num_flows_added
    def sendCompletedTaskToOtherNodes(self, task):

        Debug.PPrint(
            "%.15f" % self.env.now + "," + self.label + "," +
            'sendCompletedTaskToOtherNodes_ET::, Enter (taskid=%d)' %
            (task.get_id()), DebugCat.DEBUG_CAT_CPUINFO)

        # need to maintain temp_distribution list so that we don't duplicate any transmissions
        # e.g. P, B mapped to core-B, I mapped to core-A, so A sends task *twice* to core-B - WRONG !!
        temp_distribution_list = []

        # which tasks needs this task ?
        dependent_tasks_list = task.get_which_tasks_needs_me()

        ## check if all of the dep tasks have a mapped core, else raise error
        for each_dep_t in dependent_tasks_list:
            node_id = self.getTaskMappedNode(each_dep_t)
            if (node_id == None):
                print(self.label +
                      ' sendCompletedTaskToOtherNodes : each_task_id -' +
                      str(each_dep_t) + ' not yet mapped!, curr_task=' +
                      str(task.get_id()))
                sys.exit()
            else:

                # send to node, but before sending we need to check if
                # we have already sent it before..
                if (self.node_network.get_Nodes()
                    [node_id].dependencyBuff_IsTaskInBuff(
                        task.get_id()) == False):

                    # check if child is mapped on same node
                    # if true, put to self nodes dep buffer and mark dep check list in RMTM table
                    if (node_id == self.get_id()):
                        task.set_status(
                            TaskStatus.TASK_DATA_TRANSMISSION_COMPLETE)
                        result = self.dependencyBuff_put(task)
                        if (result == False):
                            print(self.label +
                                  ' sendCompletedTaskToOtherNodes_ET : node-' +
                                  str(self.get_id()) +
                                  ", --- dep_buff is FULL ! at " +
                                  str(self.env.now))
                            sys.exit()
                        self._markDepsCompleted_InRMTMTbl(
                            task.get_id(), each_dep_t)

        self.lock_RM_FLWtbl()
        self.lock_RM_TMtbl()

        self.resource_manager_instance.Mapper.addTo_RM_FlowTable([task],
                                                                 self.env.now,
                                                                 self.get_id())

        #self.resource_manager_instance.mutex_tmtbl.get(1)      # release lock
        #self.resource_manager_instance.flow_table.mutex.get(1)   # release lock

        self.release_RM_FLWtbl()
        self.release_RM_TMtbl()
    def notifyRM_droppedTasks(self, wf_id):
        
        #Debug.PPrint("%f"%self.env.now + "," + self.label + "," + 'notifyRM_droppedTasks::, : Enter', DebugCat.DEBUG_CAT_TDDROPPEDTASKS)
        
        dropped_task_list = []
        
        dropping_whole_video = False
        
        for task in self.multiple_workflows[wf_id].get_stream_content():
            if(self._canTaskBeDispatched(task) == True):    # task can be dispatched, but input buffers or TQs are full
                #print "here -- 1"
                if(task.get_parentGopId() == 0): # if dropping the start gop of a video stream, then drop the full video stream
                    
                    Debug.PPrint("%f"%self.env.now + "," + self.label + "," + 'notifyRM_droppedTasks::, : first gop of video stream skipped :' + str(wf_id) + " - " + str(task.get_video_stream_id()), DebugCat.DEBUG_CAT_TDDROPPEDTASKS)
                    
                    # disable disapatch of all tasks in video stream
                    full_video_tasks = self._disableAllTaskDispatchInVS(wf_id, task.get_video_stream_id())                    
                    dropped_task_list.extend(full_video_tasks)
                    
                    dropping_whole_video = True
                    
                    # record rejection
                    self.rejectedVideoStreams.append(self._getVSInfoFromTask(task))
                else:
                    Debug.PPrint("%f"%self.env.now + "," + self.label + "," + 'notifyRM_droppedTasks::, : dropping %d tasks' % (len(dropped_task_list)) , DebugCat.DEBUG_CAT_TDDROPPEDTASKS)
                    dropped_task_list.append(task)
#            else:
#                print "---"
#                print task.get_id()
#                print task.get_scheduledDispatchTime()
#                print task.get_dispatchDisabled()
#                print self.env.now
#                print "---"
                
        if(len(dropped_task_list) > 0):
            
            # dropping a normal job(s)
            if(dropping_whole_video == False):
                Debug.PPrint("%f"%self.env.now + "," + self.label + "," + 'notifyRM_droppedTasks::, : dropping %d tasks' % (len(dropped_task_list)) , DebugCat.DEBUG_CAT_TDDROPPEDTASKS)
                self.RM_instance.addDroppedTask(dropped_task_list, wf_id)
            
            # is this part of the last gop of the video stream ? if so signal RM to remove video stream from runtime app
            if(dropped_task_list[0].get_isTailVideoGop() == True):
                vs_id = dropped_task_list[0].get_video_stream_id()                
                Debug.PPrint("%f"%self.env.now + "," + self.label + "," + 'notifyRM_droppedTasks::, : last gop of video stream skipped :' + str(wf_id) + " - " + str(vs_id), DebugCat.DEBUG_CAT_TDDROPPEDTASKS)
                self.RM_instance.RuntimeApp_removeStream(wf_id, vs_id)
    def loanRelease_receiveMsg(self, flow):
        if (SimParams.CCP_ENABLE == True):
            src_id = flow.get_source()
            loan_req_count = flow.get_payload_metadata()['loan_req_count']
            Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanRelease_receiveMsg::, :'  + \
                         ", cm_id="+str(self.node_instance.get_id()) + \
                         ", src_id="+str(src_id) + \
                         ", loan_req_count=" + str(loan_req_count), DebugCat.DEBUG_CAT_CCPROTOCOL)

            release_slave_id = flow.get_payload_metadata()['slave_node_id']

            if release_slave_id in self.reserved_slave_node_ids:
                self.reserved_slave_node_ids.remove(release_slave_id)

            ## debug
            if len(self.reserved_slave_node_ids) == 0:
                Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanRelease_receiveMsg::, :'  + \
                         ", cm_id="+str(self.node_instance.get_id()) + \
                         ":: slave reserve list is now empty !!", DebugCat.DEBUG_CAT_CCPROTOCOL)
Пример #10
0
 def runPSDifferentiation(self): 
     Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "runPSDifferentiation::," + "Enter", DebugCat.DEBUG_CAT_PSALGO)       
     while True:
         Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "runPSDifferentiation::," + "new cycle", DebugCat.DEBUG_CAT_PSALGO)
         self.track_propcycles.append(self.env.now)
         if(self.env.now > self.psalgo_start_time):            
             # run the ps propagation for every node
             for each_node in self.RMInstance.node_network.get_Nodes():
                 each_node.psalgoprops.cycleDifferentiation()
         
             # one update for all of the above
             self.RMInstance.flow_table.updateTable(fire=True)
         
         # first simulation time unit
         else:
             self.initialQNSelectionandHormonePropagation()
             
         # wait for TQN seconds
         yield self.env.timeout(SimParams.PSALGO_TQN)            
Пример #11
0
    def removeFlow(self, flow_tbl_entry):

        Debug.PPrint(
            "%.15f" % self.env.now + "," + self.label + "," +
            'removeFlow::, flow_id=[%d], ftype=%d ' %
            (flow_tbl_entry.getFlow().get_id(),
             flow_tbl_entry.getFlow().get_type()),
            DebugCat.DEBUG_CAT_NOCFLWTBLINFO)

        temp_flow_storage = None
        temp_flow_storage = flow_tbl_entry.getFlow()

        flw_id = flow_tbl_entry.getFlow().get_id()
        if (flw_id in self.flowEntries):

            # what is this flows' actual latency ? (should be lower than WCET!)
            actual_latency = self.env.now - self.flowEntries[
                flw_id].getReleaseTime()
            self.flowEntries[flw_id].getFlow().set_actualLatency(
                actual_latency)

            ## debug ##
            if (SimParams.LOG_OUTPUT_NOCFLOWINFO == True):
                self._writeToLog('flowinfo.txt',
                                 self.flowEntries[flw_id].getFlow().toString())
            ###########

            self.flowEntries[flw_id].setRemovalTime(self.env.now)
            del self.flowEntries[flw_id]

            # track
            self.track_completedFlow(flow_tbl_entry)

            # update the interference of the other flows
            for fte_k, fte in self.flowEntries.iteritems():
                self.flowEntries[fte_k].removeInterferenceSource(
                    flow_tbl_entry)

            # notify completion of the flow
            new_released_tasks = self.handlerFlowComplete(temp_flow_storage)

            # update link utilisation
            if SimParams.TRACK_NOCLINKUTIL == True:
                flw_links = flow_tbl_entry.getFlow().get_route()
                for each_link in flw_links:
                    each_link.removeFlow(flow_tbl_entry.getFlow())

            return new_released_tasks

        else:
            sys.exit(
                self.label +
                "::removeFlow: Error, flow not found, flw_id=%d, ftype=%d!" %
                (flw_id, flow_tbl_entry.getFlow().get_type()))
Пример #12
0
    def startMMCTONodeDataTransfer(self, task, node, ibuff_id):

        #src_node_id = self.getClosestMMCPort(node.get_id())
        src_node_id = self.getMMCPort_handler(task.get_id(), node.get_id())
        dst_node_id = node.get_id()

        Debug.PPrint(
            "%f" % self.env.now + "," + self.label + "," +
            'startMMCTONodeDataTransfer:: before GOP execution, : task=%s, %s->%s'
            % (str(task.get_id()), str(src_node_id), str(dst_node_id)),
            DebugCat.DEBUG_CAT_MMCNODEDATATRANSFER)

        release_time = self.env.now
        nextid = self.RMInstance.flow_table.nextid
        route = self.RMInstance.interconnect.getRouteXY(
            src_node_id, dst_node_id)
        priority = SimParams.MMC_DATAREAD_FLOW_PRIORITY + nextid
        payload = task.get_mpeg_tasksize()
        basic_latency = self.RMInstance.interconnect.getRouteCostXY(
            src_node_id, dst_node_id, payload)

        endTime_wrt_BL = release_time + basic_latency
        payload_metadata = {
            'target_task': task,
            'mapped_node': node,
            'ibuff_id': ibuff_id
        }

        newflow = NoCFlow(
            nextid,
            None,
            None,
            None,  # list of dst task ids
            None,  # list of dst task ixs
            src_node_id,
            dst_node_id,
            route,
            priority,
            None,
            basic_latency,
            payload,
            endTime_wrt_BL,
            type=FlowType.FLOWTYPE_MMCTONODE_DATATRANSFER_RD,
            payload_metadata=payload_metadata)

        self.mmc_to_node_data_transfer_started_task_ids.append(
            task.get_id())  # temporarily record

        self.RMInstance.lock_RM_FLWtbl()
        # add to the flow table
        self.RMInstance.flow_table.addFlow(newflow, release_time,
                                           basic_latency)
        self.RMInstance.release_RM_FLWtbl()
Пример #13
0
    def addFlow(self, flow, releaseTime, timeRemaining):

        Debug.PPrint(
            "%.15f" % self.env.now + "," + self.label + "," +
            'addFlow::, flow_id=[%d], ftype=%d ' %
            (flow.get_id(), flow.get_type()), DebugCat.DEBUG_CAT_NOCFLWTBLINFO)
        #print flow

        # create a flow table entry
        newFlowEntry = NoCFlowTableEntry(self.env, flow, releaseTime)
        newFlow = newFlowEntry.getFlow()

        # set remaining time
        newFlowEntry.setRemainingTime(timeRemaining)

        if (self._trunc(timeRemaining, 10) == self._trunc(releaseTime, 10)):
            self.track_num_flows_too_short += 1

        # check interfering flows
        for fte_k, fte in self.flowEntries.iteritems():
            existingFlowEntry = fte
            existingFlow = fte.getFlow()
            intersects = self.getIntersectingLinks(newFlow, existingFlow)

            if (len(intersects) > 0):  # has interference
                if (existingFlow.get_priority() <=
                        newFlow.get_priority()):  # existing flow
                    newFlowEntry.addInterferenceSource(existingFlowEntry)
                    newFlowEntry.getFlow().addActualInterferer(
                        existingFlowEntry.getFlow().get_id())
                else:
                    self.flowEntries[fte_k].addInterferenceSource(newFlowEntry)
                    self.flowEntries[fte_k].getFlow().addActualInterferer(
                        newFlowEntry.getFlow().get_id())
            else:
                # no interference
                i = 1

        # add to the flow table
        if (flow.get_id() not in self.flowEntries):
            self.flowEntries[flow.get_id()] = newFlowEntry
            self.nextid += 1
            self.track_addedFlows(newFlowEntry)
        else:
            sys.exit(self.label + "::addFlow: Error!")

        # update link utilisation
        if SimParams.TRACK_NOCLINKUTIL == True:
            flw_links = flow.get_route()
            for each_link in flw_links:
                each_link.addFlow(flow)
Пример #14
0
    def interruptRMAfterDelay(self, when_to_interrupt, finished_flw_ids):

        delay = when_to_interrupt - self.env.now

        if (delay > 0):
            yield self.env.timeout(delay)  # delay

            if (self.RMInstance.status == NoCFLWTBL_RMStatus.RM_SLEEPING):
                Debug.PPrint(
                    "%.15f" % self.env.now + "," + self.label + "," +
                    'interruptRMAfterDelay::, interrupting RM (finished_flw_id='
                    + str(finished_flw_ids), DebugCat.DEBUG_CAT_INTERRUPT)
                self.RMInstance.processInstance.interrupt(
                    "NOCFLOW-" + str(finished_flw_ids))
Пример #15
0
 def cycleDifferentiation(self):        
     Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "cycleDifferentiation::," + "Enter", DebugCat.DEBUG_CAT_PSALGO)
     self.clearNearestQueenList()
             
     # establish threshold - either dynamic or static
     if(SimParams.PSALGO_DYNAMIC_THRESHOLDQN_ENABLED == True):   # dynamic         
         #normalised_cum_slack = self.node.calculate_SystemSlack_viaTQ()
         normalised_cum_slack = self.node.calculate_SystemSlack_wrt_est_relD(normalised=True)
                 
         if(normalised_cum_slack>0): # increase
             # if normalised cumulative slack is larger than zero we increase the threshold
             self.threshold_qn = float(self.threshold_qn) * float(1.0+float(normalised_cum_slack*SimParams.PSALGO_DYNAMIC_THRESHOLDQN_RATIO[0]))
             
             # in case ph level is zero
             if self.pheramone_level == 0:
                 self.pheramone_level = 0.000001
                 
             if(float(self.threshold_qn)/float(self.pheramone_level)) > 10.0:
                     self.threshold_qn = float(self.pheramone_level) * 10.0
                     
         else: # decrease
             self.threshold_qn = self.pheramone_level * SimParams.PSALGO_DYNAMIC_THRESHOLDQN_RATIO[1] # definitely do not become a queen
         
     else:   # static
         self.threshold_qn = self.threshold_qn
    
     # differentiate if queen/worker
     if (self.pheramone_level < self.threshold_qn):
         self.node_type = PSAlgorithmNodetype.QUEEN_BEE
         self.track_qn_count += 1
         # set hormone level for a queen
         #self.pheramone_level = SimParams.PSALGO_INITIALHORMONEAMNT_QUEEN           
         # broadcast
         dose = {
                 "qn_hop_distance" : 0, 
                 "p_dose" : SimParams.PSALGO_HQN, 
                 "qn_pos" : self.node.get_id(),
                 "qn_tq_info" : [t for t in self.node.get_SystemSlack_MappedTasks()],
                 "node_traversal" : [self.node.get_id()] 
                 }
         self.broadcastToNeighbours(dose)
     else:
         self.node_type = PSAlgorithmNodetype.WORKER_BEE
     
     self.track_cycleDiff.append(
                                 {
                                  'time' : self.env.now,
                                  'latenessinfo' : self.node.min_max_avg_TaskLatenessinTQ_KG() 
                                  }
                                 )
Пример #16
0
 def setInitialQueenNodes(self):
     Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "setInitialQueenNodes::," + "Enter", DebugCat.DEBUG_CAT_PSALGO)
     
     if(SimParams.PSALGO_INITQNCOUNT > SimParams.NUM_NODES):
         sys.exit("setInitialQueenNodes:: Error")
     else:
         #random_node_ids = np.random.randint(0, high=len(self.RMInstance.node_network.get_Nodes())-1, size=SimParams.PSALGO_INITQNCOUNT)
         random_qn_ids = SimParams.PSALGO_HARDCODE_QN_LOCATIONS            
         for each_node_id in random_qn_ids:
             each_node = self.RMInstance.node_network.get_Nodes()[each_node_id]                
             each_node.psalgoprops.node_type = PSAlgorithmNodetype.QUEEN_BEE
             
             # what type of hormone to use ? fixed/dynamic (e.g. based on slacks ?)                
             each_node.psalgoprops.pheramone_level = SimParams.PSALGO_INITIALHORMONEAMNT_QUEEN
    def loanRequest_receivedMsg(self, flow):
        if (SimParams.CCP_ENABLE == True):
            src_id = flow.get_source()
            old_node_id = flow.get_payload_metadata()['slave_node_id']
            late_task = flow.get_payload_metadata()['late_task']
            loan_req_count = flow.get_payload_metadata()['loan_req_count']
            old_node_util = flow.get_payload_metadata()['current_util']

            Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanRequest_receivedMsg::, :'  + \
                         ", cm_id="+str(self.node_instance.get_id()) + ", src_id="+str(src_id) + \
                          ", loan_req_count="+str(loan_req_count), DebugCat.DEBUG_CAT_CCPROTOCOL)

            # redirect message to remapping handler
            selected_node_id = self.node_instance.taskremapping_decent_scheme_instance.taskReMappingDecentSchImpl_CCP_LoanReq_v2(
                self.node_instance.get_id(), flow,
                self.reserved_slave_node_ids, old_node_util)

            if (selected_node_id != None):
                Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanRequest_receivedMsg::, :'  + \
                         ", cm_id="+str(self.node_instance.get_id()) + ", src_id="+str(src_id) + \
                          ", loan_req_count="+str(loan_req_count) + ":: +++ SLAVE FOUND !!", DebugCat.DEBUG_CAT_CCPROTOCOL)

                # reserve the selected slave
                self.reserved_slave_node_ids.append(selected_node_id)
                self.loanDelivery_sendMsg(
                    selected_node_id,
                    flow.get_payload_metadata()['cluster_manager_id'],
                    old_node_id, late_task, loan_req_count)
            else:
                Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanRequest_receivedMsg::, :'  + \
                         ", cm_id="+str(self.node_instance.get_id()) + ", src_id="+str(src_id) + \
                          ", loan_req_count="+str(loan_req_count) + ":: ---- NO SUITABLE SLAVE FOUND !!", DebugCat.DEBUG_CAT_CCPROTOCOL)

                self.loanDelivery_sendMsg(
                    self.SLAVE_NOT_FOUND,
                    flow.get_payload_metadata()['cluster_manager_id'],
                    old_node_id, late_task, loan_req_count)
Пример #18
0
 def cycleDecay(self):
     Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "cycleDecay::," + "Enter", DebugCat.DEBUG_CAT_PSALGO)
     
     if(SimParams.PSALGO_DYNAMIC_KTIMEDECAY_ENABLED == True):    # dynamic
         normalised_cum_slack = self.node.calculate_SystemSlack(normalised=True)
         reduction_ratio = self._getDynamicKTimedDecayRatio(normalised_cum_slack)     
             
         self.pheramone_level = float(float(self.pheramone_level) * reduction_ratio)  
         self.track_cycledecay.append(
                                      {
                                       'norm_slack':normalised_cum_slack,
                                       'red_ratio' : reduction_ratio
                                       }                                     
                                      )
     else: # static
         self.pheramone_level = float(float(self.pheramone_level) * SimParams.PSALGO_KTIMEDECAY)
    def DataFlowComplete(self, flow):

        Debug.PPrint(
            "%f" % self.env.now + "," + self.label + "," +
            'DataFlowComplete::, : Enter - flowid=' + str(flow.get_id()),
            DebugCat.DEBUG_CAT_MAPPERINFO)

        # set the status of the task in dep buffer to transmission complete
        dst_node_id = flow.get_destination()
        dst_node = self.RM_instance.node_network.get_Node(dst_node_id)

        # notify the destination node that the data has finished transmitting
        self.env.process(self._interruptNodeAfterDelay(dst_node,
                                                       flow.get_id()))

        return []
Пример #20
0
 def notifyRMTaskCompletion(self, finished_task):
     
     if(SimParams.MS_SIGNALLING_NOTIFY_TASK_COMPLETE_ENABLE == True):
     
         Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'notifyRMTaskCompletion::, : finished_task='+str(finished_task.get_id()), DebugCat.DEBUG_CAT_MSSIGNALLING)
         
         src_node_id = self.get_id()
         dst_node_id = SimParams.RESOURCEMANAGER_NODEID
         release_time = self.env.now
         nextid = self.resource_manager_instance.flow_table.nextid                    
         route = self.resource_manager_instance.interconnect.getRouteXY(dst_node_id, src_node_id)
         priority = SimParams.NOC_FLOW_MS_SIGNALLING_MAXPRIORITY + nextid        
         basic_latency = self.resource_manager_instance.interconnect.getRouteCostXY(dst_node_id, 
                                                                     src_node_id,
                                                                     SimParams.NOC_PAYLOAD_32BYTES)
         payload = SimParams.NOC_PAYLOAD_32BYTES
         endTime_wrt_BL = release_time + basic_latency
         payload_metadata = {
                             'finished_task_id' : finished_task.get_id(),
                             'node_id' : self.get_id(),
                             'finished_task_starttime' : finished_task.get_taskStartTime(),
                             'finished_task_endtime' : finished_task.get_taskCompleteTime(),
                             }
                     
         newflow = NoCFlow(nextid,
                        finished_task,
                        finished_task.get_id(),
                        None, # list of dst task ids
                        None, # list of dst task ixs
                        src_node_id, 
                        dst_node_id, 
                        route,
                        priority, 
                        None, 
                        basic_latency, 
                        payload,
                        endTime_wrt_BL,
                        type=FlowType.FLOWTYPE_MASTERSLAVESIGNALLING_TASKCOMPLETE,                           
                        payload_metadata=payload_metadata)
         
         self.lock_RM_FLWtbl()
         # add to the flow table
         self.resource_manager_instance.flow_table.addFlow(newflow, release_time, basic_latency)                        
         self.release_RM_FLWtbl()
         
         # update the table        
         self.resource_manager_instance.flow_table.updateTable(fire=True)   
Пример #21
0
    def received_Remapping_Request(self, flow_payload):
        #pprint.pprint(flow_payload)
        Debug.PPrint("%f"%self.env.now + "," + self.label + "," + \
                     'received_Remapping_Request::, : task:%d, o_nid:%d, n_nid:%d' % (flow_payload[0]['remapped_task'].get_id(),
                                                                                       flow_payload[0]['remapped_old_node_id'],
                                                                                       flow_payload[0]['remapped_new_node_id']
                                                                                       ), DebugCat.DEBUG_CAT_TASKREMAPPING_NOTIFICATION)

        for each_remapping_tuple in flow_payload:

            task_id = each_remapping_tuple['remapped_task'].get_id()
            wf_id = each_remapping_tuple['remapped_task'].get_wfid()
            strm_id = each_remapping_tuple[
                'remapped_task'].get_video_stream_id()
            frame_gopix = each_remapping_tuple[
                'remapped_task'].get_frameIXinGOP()
            old_node_id = each_remapping_tuple['remapped_old_node_id']
            new_node_id = each_remapping_tuple['remapped_new_node_id']

            ## update task-mapping table on RM
            if wf_id in self.vidstream_frames_mapping_table:
                if (strm_id in self.vidstream_frames_mapping_table[wf_id]):
                    self.vidstream_frames_mapping_table[wf_id][strm_id][
                        'frames_mapping'][frame_gopix] = new_node_id
                else:
                    sys.exit(
                        self.label +
                        "::received_Remapping_Request: error, strm_id does not exist"
                    )
            else:
                sys.exit(
                    self.label +
                    "::received_Remapping_Request: error, wf_id does not exist"
                )

            ## update runtime app data structures
            (updated_task,
             old_node_id) = self.RunTimeApps.updateTaskProcessingCore(
                 frame_gopix, wf_id, strm_id, new_node_id)
            if (updated_task != None) and (old_node_id != None):
                # update runtime app - task to node mapping table
                self.RunTimeApps.updateTaskToNodeMappingTbl(
                    updated_task, old_node_id, new_node_id)

        return []
Пример #22
0
 def sendCompletedPUToOtherNodes(self, task):
    
     Debug.PPrint("%.15f"%self.env.now + "," + self.label + "," +'sendCompletedPUToOtherNodes::, Enter (taskid=%s)' % (str(task.get_id())), DebugCat.DEBUG_CAT_CPUINFO_VERBOSE)
     
     # need to maintain temp_distribution list so that we don't duplicate any transmissions
     # e.g. P, B mapped to core-B, I mapped to core-A, so A sends task *twice* to core-B - WRONG !!
     temp_distribution_list = []
     
     # which tasks needs this task ?
     dependent_tasks_list = task.get_expected_data_to_children()
     
     #print "dependent_tasks_list :" , dependent_tasks_list, task
     
     ## check if all of the dep tasks have a mapped core, else raise error
     for each_dep_t, each_dep_data_size in dependent_tasks_list.iteritems():
         node_id = self.getTaskMappedNode(each_dep_t)
         if(node_id == None):
             print(self.label + ' sendCompletedTaskToOtherNodes : each_task_id -'+str(each_dep_t) + ' not yet mapped!, curr_task='+str(task.get_id()))
             sys.exit()
         else:
             
             # send to node, but before sending we need to check if
             # we have already sent it before..
             if(self.node_network.get_Nodes()[node_id].dependencyBuff_IsTaskInBuff(task.get_id()) == False):    # for now, this always returns false            
             
                 # check if child is mapped on same node
                 # if true, put to self nodes dep buffer and mark dep check list in RMTM table
                 if(node_id == self.get_id()):
                     task.set_status(TaskStatus.TASK_DATA_TRANSMISSION_COMPLETE)                        
                     result = self.dependencyBuff_put(task, each_dep_t, each_dep_data_size)
                     
                     self.track_FlowsSentToOwnDepBuff(task.get_id(), each_dep_t)
                     
                     if(result == False):
                         print(self.label + ' sendCompletedTaskToOtherNodes_ET : node-'+ str(self.get_id()) + ", --- dep_buff is FULL ! at " +  str(self.env.now))
                         sys.exit()
                     #else:                                                                                    
                     #    self._markDepsCompleted_InRMTMTbl(task.get_id(), each_dep_t)
     
     if(len(dependent_tasks_list.keys()) > 0):
         self.lock_RM_FLWtbl()
         self.lock_RM_TMtbl()                                
         self.resource_manager_instance.Mapper.addTo_RM_FlowTable_HEVC_FrameLevel([task], self.env.now, self.get_id())
         self.release_RM_FLWtbl()
         self.release_RM_TMtbl()           
    def loanRequest_sendMsg(self,
                            late_task,
                            slave_node_id,
                            current_blocking,
                            current_util,
                            update_flw_table=True):

        if (SimParams.CCP_ENABLE == True):
            Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanRequest_sendMsg::, : late_task='+str(late_task.get_id()) + \
                         ", cm_id="+str(self.node_instance.get_id()) + \
                          ", loan_req_count="+str(self.loan_req_count), DebugCat.DEBUG_CAT_CCPROTOCOL)

            payload_metadata = {
                'loan_req_count': self.loan_req_count,
                'late_task': late_task,
                'slave_node_id': slave_node_id,
                'cluster_id': self.get_cluster_id(),
                'cluster_manager_id': self.node_instance.get_id(),
                'current_blocking': current_blocking,
                'current_util': current_util,

                # what we really need is the :
                # pri, wccc, rt, exect, wf_level_props
            }

            for each_neighbour_cmid in self.list_cluster_manager_ids:
                if (each_neighbour_cmid != self.node_instance.get_id()):
                    self._sendMsgFlow(
                        finished_task=None,
                        src_node_id=self.node_instance.get_id(),
                        dst_node_id=each_neighbour_cmid,
                        payload=SimParams.CCP_BASIC_MSG_FLOWPAYLOADSIZE,
                        payload_metadata=payload_metadata,
                        type=FlowType.FLOWTYPE_CCP_LOANREQUEST,
                        update_flw_table=False)

            self.list_loan_delivery[self.loan_req_count] = {}

            if (update_flw_table == True):
                self.RMInstance.flow_table.updateTable(fire=True)

            self.loan_req_count += 1
Пример #24
0
 def initialQNSelectionandHormonePropagation(self):
     Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "initialQNSelectionandHormonePropagation::," + "Enter", DebugCat.DEBUG_CAT_PSALGO)
     
     # find nodes with initial queen status
     initial_queen_nodes = [each_node for each_node in self.RMInstance.node_network.get_Nodes() 
                            if (each_node.psalgoprops.node_type == PSAlgorithmNodetype.QUEEN_BEE)]
     
     num_flows_added = []
     for each_node in initial_queen_nodes:
         each_node.psalgoprops.node_type = PSAlgorithmNodetype.QUEEN_BEE
         each_node.psalgoprops.track_qn_count +=1
         each_node.psalgoprops.pheramone_level = SimParams.PSALGO_INITIALHORMONEAMNT_QUEEN
 
         # now propagate hormone to neighbours        
         dose = {"qn_hop_distance": 0, "p_dose": SimParams.PSALGO_HQN, 'qn_pos': each_node.get_id()}
         temp_num_flows_added = each_node.psalgoprops.broadcastToNeighbours(dose)
         num_flows_added.extend(temp_num_flows_added)
             
     # update the table (one update for all)
     if(len(num_flows_added) > 0):
         self.RMInstance.flow_table.updateTable(fire=True)   # one update for many additions 
Пример #25
0
    def addFiringRequest(self, time, by_which_flow):

        Debug.PPrint(
            "%.15f" % self.env.now + "," + self.label + "," +
            'addFiringRequest:: time_willfire=%.15f, by_which_flow=%d' %
            (time + self.env.now, by_which_flow.get_id()),
            DebugCat.DEBUG_CAT_NOCFLWTBLINFO)

        if (self._checkFiringRequestOverflow(time, by_which_flow) == False):

            # tracking
            self.track_firing_request(time, by_which_flow.get_id())

            yield self.env.timeout(time)  # delay
            self.fire_rqs_outstanding -= 1

            while (self.RMInstance.flow_table.mutex.level == 1):
                i = 1  # busy wait
            self.RMInstance.flow_table.mutex.put(1)  # obtain lock
            while (self.RMInstance.mutex_tmtbl.level == 1):
                i = 1  # busy wait
            self.RMInstance.mutex_tmtbl.put(1)  # obtain lock

            # call update again, if no firing requests were made
            result = self.updateTable(fire=True)
            if (result == False):
                #            print "FLWTBL_AFTER_UPDATE: "
                #            print "-----------------------"
                #            pprint.pprint(self.flowEntries)
                #            print "-----------------------"

                result = self.updateTable(fire=True)

    #            print "FLWTBL_AFTER_UPDATE: "
    #            print "-----------------------"
    #            pprint.pprint(self.flowEntries)
    #            print "-----------------------"

            self.RMInstance.mutex_tmtbl.get(1)  # release lock
            self.RMInstance.flow_table.mutex.get(1)  # release lock
Пример #26
0
 def cyclePSPropagation(self, hd):
     Debug.PPrint("%f"%self.env.now + "," + self.label + "," + "cyclePSPropagation::," + "Enter", DebugCat.DEBUG_CAT_PSALGO)
     if(hd["qn_hop_distance"] < SimParams.PSALGO_THRESHOLDHOPCOUNT):
         self.pheramone_level += float(hd["p_dose"])    # receive new hormone dose
         self.updateNearestQueenList(hd) # update the queen list
         # broadcast
         new_hd = {"qn_hop_distance" : hd["qn_hop_distance"]+1, 
                   "p_dose" : float(hd["p_dose"]*SimParams.PSALGO_KHOPDECAY), 
                   'qn_pos': hd['qn_pos'],
                   "qn_tq_info" : hd['qn_tq_info'],
                   "node_traversal" : hd["node_traversal"] + [self.node.get_id()]
                   }
         
         # only send out a new hormone to neighbours if the hop count is below threshold
         if(new_hd["qn_hop_distance"] < SimParams.PSALGO_THRESHOLDHOPCOUNT):           
             num_flows_added = self.broadcastToNeighbours(new_hd)
             # update the table
             if(len(num_flows_added) > 0):
                 self.node.resource_manager_instance.flow_table.updateTable(fire=True)   # one update for many additions        
     else:
         # do nothing
         i=1
    def loanDelivery_receivedMsg(self, flow):
        if (SimParams.CCP_ENABLE == True):
            src_id = flow.get_source()
            remote_cluster_manager_id = flow.get_payload_metadata(
            )['reply_from_cluster_manager_id']
            remote_slave_id = flow.get_payload_metadata()['selected_node_id']
            old_node_id = flow.get_payload_metadata()['old_node_id']
            late_task = flow.get_payload_metadata()['late_task']
            loan_req_count = flow.get_payload_metadata()['loan_req_count']

            Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanDelivery_receivedMsg::, :'  + \
                         ", cm_id="+str(self.node_instance.get_id()) + ", src_id="+str(src_id) + \
                          ", loan_req_count="+str(loan_req_count), DebugCat.DEBUG_CAT_CCPROTOCOL)

            if (remote_cluster_manager_id
                    not in self.list_loan_delivery[loan_req_count]):
                self.list_loan_delivery[loan_req_count][
                    remote_cluster_manager_id] = remote_slave_id

            # start checking loan del requests only when all replies have been received
            if (len(self.list_loan_delivery[loan_req_count].keys()) == (
                    len(self.list_cluster_manager_ids) - 1)):
                # check if all replies have been received
                selected_cluster_manager_id = self.node_instance.taskremapping_decent_scheme_instance.taskReMappingDecentSchImpl_CCP_LoanReply(
                    self.list_loan_delivery[loan_req_count], old_node_id,
                    late_task)

                # rejected cluster managers
                #rej_cm_list = [(nid, self.list_loan_delivery[nid]) for nid in self.list_loan_delivery.keys() if nid != selected_cluster_manager_id]
                rej_cm_list = [
                    (cmid, self.list_loan_delivery[loan_req_count][cmid])
                    for cmid, slaveid in
                    self.list_loan_delivery[loan_req_count].iteritems()
                    if slaveid != self.SLAVE_NOT_FOUND
                ]

                # notify all other cluster managers to release their resource
                self.loanRelease_sendMsg(rej_cm_list, loan_req_count)
    def MSSignallingFlowComplete(self, flow, type):

        Debug.PPrint(
            "%f" % self.env.now + "," + self.label + "," +
            'MSSignallingFlowComplete::, : Enter - flowid=' +
            str(flow.get_id()), DebugCat.DEBUG_CAT_MAPPERINFO)

        if (type == FlowType.FLOWTYPE_MASTERSLAVESIGNALLING_TASKCOMPLETE):
            payload_metadata = flow.get_payload_metadata()
            finished_task_id = payload_metadata['finished_task_id']
            node_id = payload_metadata['node_id']
            if (finished_task_id in self.RM_instance.task_mapping_table):
                self.RM_instance.task_mapping_table[finished_task_id][
                    'status'] = TaskStatus.TASK_COMPLETED

            # update the valatile tm table
            self.volatileTaskMappingTable_RemoveSpecificTask(
                finished_task_id, node_id)

        elif (type == FlowType.FLOWTYPE_MASTERSLAVESIGNALLING_FLOWCOMPLETE):
            # mark dependancies
            # this completed flow may serve two dependancies - need to take this into account
            src_task_id = flow.get_payload_metadata(
            )['finished_flow_src_task_id']
            dst_task_ids = flow.get_payload_metadata(
            )['finished_flow_dst_task_ids']

            for dst_task_id in dst_task_ids:
                if (src_task_id
                        not in self.RM_instance.task_mapping_table[dst_task_id]
                    ['deps_completed']):
                    self.RM_instance.task_mapping_table[dst_task_id][
                        'deps_completed'].append(src_task_id)
        else:
            sys.exit(self.label + ":: MSSignallingFlowComplete : error")

        return []
    def loanDelivery_sendMsg(self, selected_node_id, dst_cm_id, old_node_id,
                             late_task, loan_req_count):
        #print "loanDelivery_sendMsg: enter"
        if (SimParams.CCP_ENABLE == True):
            Debug.PPrint("%f"%self.env.now + "," + self.label + "," +'loanDelivery_sendMsg::, :'  + \
                         ", cm_id="+str(self.node_instance.get_id()) + ", dst_cm_id=" + str(dst_cm_id) + \
                          ", loan_req_count="+str(loan_req_count), DebugCat.DEBUG_CAT_CCPROTOCOL)
            payload_metadata = {
                'loan_req_count': loan_req_count,
                'late_task': late_task,
                'old_node_id': old_node_id,
                'selected_node_id': selected_node_id,
                'reply_from_cluster_id': self.get_cluster_id(),
                'reply_from_cluster_manager_id': self.node_instance.get_id(),
            }

            self.env.process(
                self._sendMsgAfterDelay(
                    finished_task=None,
                    src_node_id=self.node_instance.get_id(),
                    dst_node_id=dst_cm_id,
                    payload=SimParams.CCP_BASIC_MSG_FLOWPAYLOADSIZE,
                    payload_metadata=payload_metadata,
                    type=FlowType.FLOWTYPE_CCP_LOANDELIVERY))
Пример #30
0
    def updateTable(self, fire=False):

        Debug.PPrint(
            "%.15f" % self.env.now + "," + self.label + "," + 'updateTable::',
            DebugCat.DEBUG_CAT_NOCFLWTBLINFO)

        finished_flw_ids = []
        fired = False

        # create list of flows in decreasing order of priority
        #         flows_list =[]
        #         for fte_key, fte in self.flowEntries.iteritems():
        #             flows_list.append((fte_key, fte))

        flows_list = self.flowEntries.items()
        flows_list_pri_sorted = sorted(
            flows_list,
            key=lambda x: x[1].getFlow().get_priority(),
            reverse=False)

        # check if the flow table entry has completed
        for each_fte in flows_list_pri_sorted:
            each_fte_flow = each_fte[1]
            each_fte_flow_key = each_fte[0]

            # active flows
            if (each_fte_flow.active == True):
                # checks whether current flow completed transmission
                # if true, remove it from the main flow table

                #                 #### debug ###
                #                 if(each_fte_flow_key == 1256):
                #                     print "1256:: i_flws :"
                #                     print each_fte_flow
                #                     print [k for k in self.flowEntries.keys()]
                #                     print "%.15f" % self._trunc(each_fte_flow.getRemainingTime(), 10)
                #                     print "%.15f" % self._trunc(self.env.now-each_fte_flow.getLastActivationTime(), 10)
                #                 #############

                #if(round(each_fte_flow.getRemainingTime(),11) <= round(self.env.now-each_fte_flow.getLastActivationTime(), 11)):
                if (self._trunc(each_fte_flow.getRemainingTime(), 10) <=
                        self._trunc(
                            self.env.now -
                            each_fte_flow.getLastActivationTime(), 10)):
                    new_released_tasks = self.removeFlow(each_fte_flow)
                    #print [k for k in self.flowEntries.keys()]
                    if (len(new_released_tasks) > 0):
                        finished_flw_ids.append(
                            each_fte_flow.getFlow().get_id())
                    # add task to the dep buffer of the dst node
                    self._flowComplete_putToNodeDepBuff(
                        each_fte_flow.getFlow())

                else:  ## TODO: ASK JH

                    # check if any of the intersecting flows have become active
                    # if true, update current flow as inactive and update its remaining time
                    each_fte_interferrence_flows = each_fte_flow.getInterferenceFlows(
                    )
                    env_now_trunc = self._trunc(self.env.now, 10)

                    for fte in each_fte_interferrence_flows:
                        fte_isActive = fte.active
                        if (
                            (fte_isActive == True) or
                            (fte_isActive == False and (self._trunc(
                                fte.getReleaseTime(), 10) == env_now_trunc))
                        ):  # incase the interfering flow was release just now
                            #each_fte_flow.setInactive()
                            self.flowEntries[each_fte_flow_key].setInactive()
                            #rt = float(each_fte_flow.getRemainingTime() - (self.env.now - each_fte_flow.getLastActivationTime()))
                            rt = self._trunc(
                                each_fte_flow.getRemainingTime() -
                                (self.env.now -
                                 each_fte_flow.getLastActivationTime()), 11)
                            #each_fte_flow.setRemainingTime(rt)
                            self.flowEntries[
                                each_fte_flow_key].setRemainingTime(rt)

                            break

            # inactive flows
            else:

                # checks whether all interfering flows became inactive (or terminated)
                # if true, set current flow as active

                each_fte_interferrence_flows = each_fte_flow.getInterferenceFlows(
                )

                ### debug ###
                #                 if(each_fte_flow_key == 195):
                #                     print "195:: i_flws :"
                #                     pprint.pprint(each_fte_interferrence_flows)
                #                     print str(finished_flw_ids)
                #############

                #                 allInactive = True
                #                 for fte in each_fte_interferrence_flows:
                #                     if(fte.isActive() == True):
                #                         allInactive = False
                #                         #break   # TODO: ASK JHR

                # are all int.flows inactive ?
                if any([fte.active for fte in each_fte_interferrence_flows]):
                    allInactive = False
                else:
                    allInactive = True

                if (allInactive == True):
                    #each_fte_flow.setActive()
                    self.flowEntries[each_fte_flow_key].setActive()
                    if (fire == True):

                        #                         #### debug ###
                        #                         if(each_fte_flow_key == 1256):
                        #                             print "firing---"
                        #                             print "1256:: i_flws :"
                        #                             print each_fte_flow
                        #                             print "%.15f" % each_fte_flow.getRemainingTime()
                        #                             print "%.15f" % self.env.now

                        fire_time = (each_fte_flow.getRemainingTime() +
                                     0.0000000001)
                        #print "going to check _checkFiringRequestOverflow"
                        if (self._checkFiringRequestOverflow(
                                fire_time, each_fte_flow.getFlow()) == False):
                            self.env.process(
                                self.addFiringRequest(fire_time,
                                                      each_fte_flow.getFlow()))
                            fired = True
                            self.fire_rqs_outstanding += 1

#        print "FLWTBL_AFTER_UPDATE: "
#        print "-----------------------"
#        pprint.pprint(self.flowEntries)
#        print "-----------------------"

        if (len(finished_flw_ids) > 0):
            #self.interruptRMAfterDelay(float(self.env.now+SimParams.SYNCH_TIME_OFFSET), -1)
            when_to_interrupt = self.env.now + SimParams.SYNCH_TIME_OFFSET
            self.env.process(
                self.interruptRMAfterDelay(when_to_interrupt,
                                           finished_flw_ids))

        # update idle links
        self.updateLinkStatus()

        # return result
        if (fired == False):
            if (self.fire_rqs_outstanding == 0):
                #sys.exit("nothing fired")
                #print "=========================>>>>>> nothing fired"
                return False
            else:
                return True
        else:
            return True