コード例 #1
0
ファイル: net_plumber.py プロジェクト: oceanly020/hassel-op
    def __set_pipeline_dependencies_mp(self, rule):
        '''
      @rule is newly added rule
      '''
        #setting up threads
        dataQ = jQueue()
        resultQ = jQueue()
        sigterm = Event()
        processess = []
        for i in range(NUM_THREADS):
            p = set_pipeline_process(rule, dataQ, resultQ, sigterm)
            processess.append(p)
            p.start()

        for port in rule.output_ports:
            next_ports = self.get_dst_end_of_link(port)
            for next_port in next_ports:
                potential_next_rules = self.__get_rules_by_input_port(
                    next_port)
                for r in potential_next_rules:
                    dataQ.put((r, port, next_port, False))
        for port in rule.input_ports:
            previous_ports = self.get_src_end_of_link(port)
            for previous_port in previous_ports:
                potential_back_rules = self.__get_rules_by_output_port(
                    previous_port)
                for r in potential_back_rules:
                    dataQ.put((r, port, previous_port, True))

        dataQ.join()
        sigterm.set()
        count = NUM_THREADS
        while (count > 0):
            next_result = resultQ.get()
            if next_result == None:
                count -= 1
                continue
            (survived_hs, node_id, rule_port, r_port, back) = next_result
            r = self.node_by_id[node_id]
            if (back):
                r.set_next_in_pipeline(rule, survived_hs, r_port, rule_port)
                rule.set_previous_in_pipeline(r, survived_hs, rule_port,
                                              r_port)
            else:
                rule.set_next_in_pipeline(r, survived_hs, rule_port, r_port)
                r.set_previous_in_pipeline(rule, survived_hs, r_port,
                                           rule_port)

        for p in processess:
            p.join()
コード例 #2
0
ファイル: net_plumber.py プロジェクト: alem0lars/hassel
    def __set_pipeline_dependencies_mp(self, rule):
      '''
      @rule is newly added rule
      '''
      #setting up threads
      dataQ = jQueue()
      resultQ = jQueue()
      sigterm = Event()
      processess = []
      for i in range(NUM_THREADS):
        p = set_pipeline_process(rule,dataQ,resultQ,sigterm)
        processess.append(p)
        p.start()
      
      for port in rule.output_ports:
        next_ports = self.get_dst_end_of_link(port)
        for next_port in next_ports:
          potential_next_rules = self.__get_rules_by_input_port(next_port)
          for r in potential_next_rules:
            dataQ.put((r,port,next_port,False))
      for port in rule.input_ports:
        previous_ports = self.get_src_end_of_link(port)
        for previous_port in previous_ports: 
          potential_back_rules = self.__get_rules_by_output_port(previous_port)
          for r in potential_back_rules:
            dataQ.put((r,port,previous_port,True))
            
      dataQ.join()
      sigterm.set()
      count = NUM_THREADS
      while (count > 0):
        next_result = resultQ.get()
        if next_result == None:
          count -= 1
          continue
        (survived_hs,node_id,rule_port,r_port,back) = next_result
        r = self.node_by_id[node_id]
        if (back):
          r.set_next_in_pipeline(rule,survived_hs,r_port,rule_port)
          rule.set_previous_in_pipeline(r,survived_hs,rule_port,r_port)
        else:
          rule.set_next_in_pipeline(r,survived_hs,rule_port,r_port)
          r.set_previous_in_pipeline(rule,survived_hs,r_port,rule_port)

      for p in processess:
        p.join()
コード例 #3
0
ファイル: net_plumber.py プロジェクト: alem0lars/hassel
 def __route_source_flow_mp(self, rule):
   '''
   Note: node should already have all the pipeline and influence states 
   set up before calling this method.
   @rule: the rule for which we want to route flow
   '''
   # taskQ: a queue of tasks.
   # each task is (prev_rule_pipeline_to_rule, source_flow).
   # source_flow should be routed from prev_rule to rule
   print "route source flow"
   taskQ = jQueue()
   resultQ = jQueue()
   # create thread
   processess = []
   sigterm = Event()
   for i in range(NUM_THREADS):
     p = route_source_flow_process(taskQ,resultQ,sigterm)
     processess.append(p)
     p.start()
     
   if rule.__class__ == SourceNode:
     for pipeline in rule.next_in_pipeline:
       taskQ.put((pipeline,rule.source_flow[0]))
   elif rule.__class__ == RuleNode:
     for (r,h,p1,p2) in rule.previous_in_pipeline:
       for pipeline in r.pipelines_to(rule):
         for s_flow in r.source_flow:
           taskQ.put((pipeline,s_flow))
   
   taskQ.join()
   sigterm.set()
   count = NUM_THREADS
   while (count > 0):
     next_result = resultQ.get()
     if next_result == None:
       count -= 1
       continue
     (node_id,new_source_flow) = next_result
     r = self.node_by_id[node_id]
     r.source_flow.append(new_source_flow)
     
   for p in processess:
     p.join()
   print "end: route source flow"
コード例 #4
0
ファイル: net_plumber.py プロジェクト: alem0lars/hassel
 def __set_influences_mp(self, rule):
   '''
   adds influence of all higher ranked rules to @rule.
   add influence of @rule to all lower ranked rules.  
   @rule is newly added rule
   '''
   #setting up threads
   dataQ = jQueue()
   resultQ = jQueue()
   sigterm = Event()
   processess = []
   for i in range(NUM_THREADS):
     p = set_influence_process(rule,dataQ,resultQ,sigterm)
     processess.append(p)
     p.start()
     
   table = rule.table
   higherPriority = True
   for r in self.tables[table]:
     if rule.node_id == r.node_id:
       higherPriority = False
     else:
       dataQ.put((r,higherPriority))
   
   #waiting for threads to be done.
   dataQ.join()
   sigterm.set()
   count = NUM_THREADS
   while (count > 0):
     next_result = resultQ.get()
     if next_result == None:
       count -= 1
       continue
     (rule_id,higher_priority,com_hs,com_ports) = next_result
     r = self.node_by_id[rule_id]
     if (higher_priority):
       r.influenced_on_rule(rule)
       rule.affected_by_rule(r, com_hs, com_ports)
     else:
       rule.influenced_on_rule(r)
       r.affected_by_rule(rule, com_hs, com_ports)
       
   for p in processess:
     p.join()
コード例 #5
0
    def __route_source_flow_mp(self, rule):
        '''
        Note: node should already have all the pipeline and influence states
        set up before calling this method.
        @rule: the rule for which we want to route flow
        '''
        # taskQ: a queue of tasks.
        # each task is (prev_rule_pipeline_to_rule, source_flow).
        # source_flow should be routed from prev_rule to rule
        print "route source flow"
        taskQ = jQueue()
        resultQ = jQueue()
        # create thread
        processess = []
        sigterm = Event()
        for i in range(NUM_THREADS):
            p = route_source_flow_process(taskQ,resultQ,sigterm)
            processess.append(p)
            p.start()

        if rule.__class__ == SourceNode:
            for pipeline in rule.next_in_pipeline:
                taskQ.put((pipeline,rule.source_flow[0]))
        elif rule.__class__ == RuleNode:
            for (r,h,p1,p2) in rule.previous_in_pipeline:
                for pipeline in r.pipelines_to(rule):
                    for s_flow in r.source_flow:
                        taskQ.put((pipeline,s_flow))

        taskQ.join()
        sigterm.set()
        count = NUM_THREADS
        while (count > 0):
            next_result = resultQ.get()
            if next_result == None:
                count -= 1
                continue
            (node_id,new_source_flow) = next_result
            r = self.node_by_id[node_id]
            r.source_flow.append(new_source_flow)

        for p in processess:
            p.join()
        print "end: route source flow"
コード例 #6
0
    def __set_influences_mp(self, rule):
        '''
        adds influence of all higher ranked rules to @rule.
        add influence of @rule to all lower ranked rules.
        @rule is newly added rule
        '''
        #setting up threads
        dataQ = jQueue()
        resultQ = jQueue()
        sigterm = Event()
        processess = []
        for i in range(NUM_THREADS):
            p = set_influence_process(rule,dataQ,resultQ,sigterm)
            processess.append(p)
            p.start()

        table = rule.table
        higherPriority = True
        for r in self.tables[table]:
            if rule.node_id == r.node_id:
                higherPriority = False
            else:
                dataQ.put((r,higherPriority))

        #waiting for threads to be done.
        dataQ.join()
        sigterm.set()
        count = NUM_THREADS
        while (count > 0):
            next_result = resultQ.get()
            if next_result == None:
                count -= 1
                continue
            (rule_id,higher_priority,com_hs,com_ports) = next_result
            r = self.node_by_id[rule_id]
            if (higher_priority):
                r.influenced_on_rule(rule)
                rule.affected_by_rule(r, com_hs, com_ports)
            else:
                rule.influenced_on_rule(r)
                r.affected_by_rule(rule, com_hs, com_ports)

        for p in processess:
            p.join()
コード例 #7
0
ファイル: __init__.py プロジェクト: Kasyx709/BlackJack
    def build_house(cls):
        """
        Builds a Server Queue Manager(SQM) to control player and dealer turns.
        Assigns address to localhost with port 0 to ensure OS will assign an open port
        Authentication is key upon MetaClass initialization.
        Method returns SQM wrapped inside of a thread
        :return: Thread(target=BaseManager(address=('localhost', 0), authkey=cls.authkey), args=(),)
        """
        turn_queue = jQueue()

        class HouseManager(BaseManager):
            pass

        HouseManager.register('cards', callable=lambda: turn_queue)
        HouseManager.register('start_server', callable=lambda _: _.serve_forever())
        manager = HouseManager(address=('localhost', cls.port), authkey=MetaHouse.token)
        server = manager.get_server()
        setattr(cls, 'server', Thread(target=server.serve_forever, args=(), ))
        return