예제 #1
0
    def __init__(self, process: Subprocess, chunk_size=CHUNK_SIZE_DEFAULT):
        if not isinstance(process, Subprocess):
            raise TypeError("process must be Subprocess")

        if not process.is_alive():
            raise ValueError("Process wasn't working.")

        if chunk_size <= 0:
            raise ValueError("Chunk size must be > 0.")

        if self.process.stdout is None and self.process.stdin is None:
            raise RuntimeError("Process IO are unavailable.")

        self.process = process
        self.chunk_size = chunk_size
        self.read_buffer_cache = b""

        if self.process.stdin is not None:
            self.queue_write = Queue()
            self.thread_write = Thread(target=self._write)
            self.thread_write.start()
        else:
            self.queue_write = None
            self.thread_write = None

        if self.process.stdout is not None:
            self.queue_read = Queue()
            self.thread_read = Thread(target=self._read)
            self.thread_read.start()
        else:
            self.queue_read = None
            self.thread_read = None
예제 #2
0
 def __init__(self, name=None):
     self.name = name
     self._state = None
     ctx = SpawnContext()
     self.alive = Value('b', True)
     self.in_queue = Queue(ctx=ctx, maxsize=120)
     self.out_queue = Queue(ctx=ctx, maxsize=110)
예제 #3
0
    def process_init(self):
        self.event_queue = Queue()
        self.event_queue_name = str(id(self))
        from compmake.plugins.backend_pmake.pmake_manager import PmakeManager
        PmakeManager.queues[self.event_queue_name] = self.event_queue

        # info('Starting %d processes' % self.num_processes)

        self.subs = {}  # name -> sub
        # available + processing + aborted = subs.keys
        self.sub_available = set()
        self.sub_processing = set()
        self.sub_aborted = set()

        self.signal_queue = Queue()

        db = self.context.get_compmake_db()
        storage = db.basepath  # XXX:
        logs = os.path.join(storage, 'logs')
        for i in range(self.num_processes):
            name = 'w%02d' % i
            write_log = os.path.join(logs, '%s.log' % name)
            make_sure_dir_exists(write_log)
            signal_token = name
            self.subs[name] = PmakeSub(name, 
                                       signal_queue=self.signal_queue,
                                       signal_token=signal_token,
                                       write_log=write_log)
        self.job2subname = {}
        self.subname2job = {}
        # all are available at the beginning
        self.sub_available.update(self.subs)

        self.max_num_processing = self.num_processes
예제 #4
0
파일: engine.py 프로젝트: zoulianmp/HQCMeas
    def force_stop(self):
        self._stop_requested = True
        # Just in case the user calls this directly. Will signal all threads to
        # stop (save _com_thread).
        self._stop.set()
        self._log_queue.put(None)
        self._monitor_queue.put((None, None))

        # Set _force_stop to stop _com_thread.
        self._force_stop.set()

        # Terminate the process and make sure all threads stopped properly.
        self._process.terminate()
        self._log_thread.join()
        self._monitor_thread.join()
        self._com_thread.join()
        self.active = False
        if self._processing.is_set():
            self.done = ('INTERRUPTED', 'The user forced the system to stop')
            self._processing.clear()

        # Discard the queues as they may have been corrupted when the process
        # was terminated.
        self._log_queue = Queue()
        self._monitor_queue = Queue()
예제 #5
0
    def __init__(self, config, event_queue, debug, rib, policy_handler, test,
                 no_notifications, rib_timing, notification_timing):
        super(LoopDetector, self).__init__(config, event_queue, debug)

        self.config = config

        self.cib = CIB(self.config.id)
        self.rib = rib
        self.policy_handler = policy_handler

        # mapping of participant and prefix to list of forbidden forward participants
        self.forbidden_paths = defaultdict(lambda: defaultdict(list))

        self.run = False
        self.listener = Listener(
            (self.config.sdx.address, self.config.loop_detector.port),
            authkey=None)
        self.msg_in_queue = Queue(1000)
        self.msg_out_queue = Queue(1000)

        self.no_notifications = no_notifications

        self.rib_timing = rib_timing
        if self.rib_timing:
            self.rib_timing_file = 'rib_timing_' + str(int(time())) + '.log'

        self.notification_timing = notification_timing
        if self.notification_timing:
            self.notification_timing_file = 'notification_timing_' + str(
                int(time())) + '.log'
예제 #6
0
 def _setup(self):
     if isinstance(self._instruction, I.ATM):
         self._code = self._instruction.code
         self._instruction_pipelines.append(Pipeline(self._code.instructions, self._tables))
         self._atomic_process = Process(target=self.run_atomic)
         self._is_atomic_enabled = True
     elif isinstance(self._instruction, I.SEQ):
         self._code = self._instruction.code
         self._instruction_pipelines.append(Pipeline(self._code.instructions, self._tables))
         self._sequential_ingress_process = Process(target=self.run_sequential_ingress)
         self._sequential_egress_process = Process(target=self.run_sequential_egress)
         self._metadata_queue = Queue()
         self._is_sequential_enabled = True
     elif isinstance(self._instruction, I.CNC):
         # Note: CNC can't have PUSH/POP instructions in its code blocks. They violate the concurrency invariant.
         self._codes = self._instruction.codes
         self._modified_locations = []
         self._modified_reserved_fields = []
         self._modified_fields = []
         for code in self._codes:
             self._instruction_pipelines.append(Pipeline(code.instructions, self._tables))
             self._modified_locations.append(get_modified_locations(code.instructions))
             self._modified_reserved_fields.append(get_modified_reserved_fields(code.instructions))
             self._modified_fields.append(get_modified_fields(code.instructions, code.argument_fields))
         self._concurrent_ingress_process = Process(target=self.run_concurrent_ingress)
         self._concurrent_egress_process = Process(target=self.run_concurrent_egress)
         self._metadata_queue = Queue()
         self._is_concurrent_enabled = True
     else:
         raise RuntimeError()
예제 #7
0
 def setUp(self):
     self.notif_queue = Queue(1)
     self.error_queue = Queue()
     self.component = Component(self.notif_queue._reader,
                                CommonErrorStrategy(),
                                self.error_queue,
                                PostgresConnector(_POSTGRES_DSN))
     self.component.log = MagicMock()
예제 #8
0
 def setUp(self):
     pg_connector = PostgresConnector(_POSTGRES_DSN)
     self.notif_queue = Queue(1)
     self.listener = PostgresNotificationListener(
         pg_connector, _NOTIF_CHANNEL, self.notif_queue,
         CommonErrorStrategy(), Queue(), fire_on_start=False
     )
     self.listener.log = MagicMock()
 def setUp(self):
     self.filter_queue = Queue()
     self.message_queue = Queue()
     self.client_cfg = {"ip_address": b"127.0.0.1",
                        "filename": b"/dev/null",
                        "verbose": 0,
                        "port": "1234",
                        }
     self.stop_event = Event()
     self.handler = DLTMessageHandler(self.filter_queue, self.message_queue, self.stop_event, self.client_cfg)
예제 #10
0
    def __init__(self, pull_interval=5):

        self.input = None
        self.filter = None
        self.output = None

        # for input write and filter read
        self.iqueue = Queue()
        # for filter write and output read
        self.oqueue = Queue()

        self.pull_interval = pull_interval

        self.__init_all()
예제 #11
0
 def CreateSteamLine(self):
     '''
     create stream line
     @return: a list which contains all processes and streamline output queue
     '''
     processUnitList = []
     inputQueue = None
     outputQueue = None
     processUnit = None
     if self.streamLineTemplate:
         for (index, processTemplate) in enumerate(self.streamLineTemplate):
             outputQueue = Queue(maxsize=self.processQueueSize)
             pCount = processTemplate.get('pCount')  # get process number
             if index == 0:
                 for ind in xrange(pCount):
                     processUnit = ProcessUnit(
                         processTemplate,
                         outputQueue=outputQueue)  #create producer process
                     processUnitList.append(processUnit)
             else:
                 for ind in xrange(pCount):
                     processUnit = ProcessUnit(
                         processTemplate,
                         inputQueue=inputQueue,
                         outputQueue=outputQueue)  #create consumer process
                     processUnitList.append(processUnit)
             inputQueue = outputQueue
     return processUnitList, outputQueue
예제 #12
0
def main():
    result_queue = Queue()
    crawler = CrawlerWorker(CanberraWealtherSpider(), result_queue)
    crawler.start()
    for item in result_queue.get():
        #print datetime.datetime.now(),item
        print item
예제 #13
0
    def _calculate_rmse_mp(self, population, process_count):
        i = 0
        process_pop = dict()
        while i < len(population):
            for j in range(process_count):
                if str(j) not in process_pop.keys():
                    process_pop[str(j)] = []

                if i < len(population):
                    process_pop[str(j)].append(population[i])
                    i += 1

        final_population = []
        queue = Queue()
        processes = []
        for i in range(process_count):
            pop = process_pop[str(i)]

            process = Process(target=self._calculate_rmse,
                              name="%d" % i,
                              args=(pop, queue))
            process.start()
            processes.append(process)

        for i in range(process_count):
            final_population += queue.get()

        for process in processes:
            process.join()

        return final_population
예제 #14
0
파일: process_pool.py 프로젝트: fedjaz/Labs
    def map(self, target, args):
        processes = SimpleQueue()
        outputs = [None] * len(args)
        for i in args:
            q = Queue(1, ctx=multiprocessing.get_context())
            p = Process(target=target, args=(i, q))
            processes.put((p, q))

        active = []
        is_failed = False
        for i in range(0, min(self.max_processes, len(args))):
            p = processes.get()
            active.append(p)
            p[0].start()

        while active or not processes.empty():
            for i in active:
                if not i[0].is_alive():
                    active.remove(i)
                    res = i[1].get()
                    outputs[res.index] = res
                    if res.result != TestResults.OK:
                        is_failed = True
            while not processes.empty() and len(active) < self.max_processes:
                p = processes.get()
                if not is_failed:
                    p[0].start()
                    active.append(p)

        return outputs
예제 #15
0
 def compute_sp(self):
     from Queue import Queue
     queue = Queue()
     datalen = len(self.D['coords'])
     self(queue, 0, datalen, True, False)
     self(queue, 0, datalen, False, False)
     return queue.get() + queue.get()
예제 #16
0
def MKLMultiProcessing(MKLData,CORES,NSOURCES,toPredict):

    Scores=np.zeros([(CORES*(NSOURCES/CORES)),toPredict],dtype=np.object)
    Labels=np.zeros([(CORES*(NSOURCES/CORES)),toPredict],dtype=np.object)
    queues = [Queue() for i in range(CORES)]
    args = [(MKLData,(i*int(NSOURCES/CORES)), int(NSOURCES/CORES)*(i+1),queues[i],i) for i in range(CORES)]
    #print args
    jobs = [Process(target=TrainMKLClassifier, args=(a)) for a in args]
    for j in jobs: j.start()
    i=0
    k=0
    for q in queues: 
        item=q.get()
        l= item[0]
#        print l
        val = l[1]
        lab= l[2]
        for j in range (0,val.shape[0]):
            Scores[k,:]=val[j]
            Labels[k,:]=lab[j]
            k=k+1
        i=i+1
    for j in jobs: j.join()
    df_scores=pd.DataFrame(Scores.T)
    df_testlabels=pd.DataFrame(Labels.T)
    y_score=df_scores.as_matrix()
    y_test=df_testlabels.as_matrix()    
    return y_score,y_test
예제 #17
0
파일: mp_ext.py 프로젝트: XinliYu/utix
def parallel_process_by_queue(num_p,
                              data_iter,
                              target,
                              args,
                              ctx: BaseContext = None,
                              task_unit_size=5000,
                              print_out=__debug__):
    if isinstance(target, MPTarget):
        target.use_queue = True
    if ctx is None:
        ctx = get_context('spawn')
    iq = Queue(ctx=ctx)
    oq: Manager = ctx.Manager().Queue()

    tic(f"Creating input queue with task unit size {task_unit_size}",
        verbose=print_out)
    cnt_task_unit = 0
    for item in tqdm(slices__(data_iter, task_unit_size)):
        iq.put(item)
        cnt_task_unit += 1
    jobs = [None] * num_p
    for i in range(num_p):
        jobs[i] = ctx.Process(target=target, args=(i, iq, oq) + args)
    toc()

    tic(f"Working on {cnt_task_unit} task units with {num_p} processes",
        verbose=print_out)
    start_and_wait_jobs(jobs)

    out = []
    while not oq.empty():
        out.append(oq.get_nowait())
    toc()
    return out
예제 #18
0
파일: queue.py 프로젝트: cgarciae/pypeln
    def __init__(self, maxsize: int = 0, total_sources: int = 1):
        super().__init__(maxsize=maxsize, ctx=multiprocessing.get_context())

        self.namespace = utils.Namespace(remaining=total_sources,
                                         exception=False,
                                         force_stop=False)
        self.exception_queue: Queue[PipelineException] = Queue(
            ctx=multiprocessing.get_context())
예제 #19
0
 def __init__(self, name, machine, ready_func, workers=1):
     self.id = name
     self.machine = machine
     self.ready_func = ready_func
     self.name = name
     self.readq = Queue(maxsize=-1, ctx=multiprocessing.get_context())
     self.num_workers = max(1, workers)
     self.workers = []
예제 #20
0
    def __init__(self, functions):
        self.outbound = Queue()
        self.inbound = Queue()
        self.inbound = Queue()

        #MAKE

        #MAKE THREADS
        self.threads = []
        for t, f in enumerate(functions):
            thread = worker(
                "worker " + unicode(t),
                f,
                self.inbound,
                self.outbound,
            )
            self.threads.append(thread)
예제 #21
0
 def setUp(self):
     if six.PY2:
         self.filter_queue = Queue()
         self.message_queue = Queue()
     else:
         self.ctx = get_context()
         self.filter_queue = Queue(ctx=self.ctx)
         self.message_queue = Queue(ctx=self.ctx)
     self.client_cfg = {
         "ip_address": "127.0.0.1",
         "filename": "/dev/null",
         "verbose": 0,
         "port": "1234"
     }
     self.stop_event = Event()
     self.handler = DLTMessageHandler(self.filter_queue, self.message_queue,
                                      self.stop_event, self.client_cfg)
예제 #22
0
    def __init__(self, target, args, filename, cpus=cpu_count()):
        # macOS starts process with fork by default: https://zhuanlan.zhihu.com/p/144771768
        if platform.system() == "Darwin":
            set_start_method("fork")

        workerq = Queue()
        writerq = Queue()

        for a in args:
            workerq.put(a)

        cpus = min(cpus, len(args))
        for i in range(cpus):
            workerq.put(Poison())

        self.worker = Jobs(work, args=[(workerq, writerq, target)] * cpus)
        self.writer = Process(target=write,
                              args=(workerq, writerq, filename, cpus))
예제 #23
0
    def __enter__(self):
        self.smtp_process_queue = Queue()
        self.smtp_process = Process(
            target=get_otp_mail, args=(self.smtp_process_queue, self.timeout))
        self.smtp_process.start()
        self.port = self.smtp_process_queue.get(True, 5)
        self._do_lintop_config()

        return self
예제 #24
0
    def create_endpoint(self, name: str) -> Endpoint:
        if name in self._endpoints:
            raise ValueError("An endpoint with that name does already exist")

        receiving_queue: Queue = Queue(0, ctx=self.ctx)

        endpoint = Endpoint(name, self._incoming_queue, receiving_queue)
        self._endpoints[name] = endpoint
        return endpoint
예제 #25
0
 def start(self,url):                
     # raise BadFormatError
     items = []
     # The part below can be called as often as you want        
     results = Queue()
     crawler = CrawlerWorker(LinkedinSpider(url), results)
     crawler.start()
     for item in results.get():
         items.append(dict(item))
     return items
예제 #26
0
 def __init__(
     self,
     ctx: Union[ModuleType,
                multiprocessing.context.BaseContext] = multiprocessing
 ) -> None:
     self.ctx = ctx
     self._queues: List[multiprocessing.Queue] = []
     self._endpoints: Dict[str, Endpoint] = {}
     self._incoming_queue: Queue = Queue(0, ctx=self.ctx)
     self._running = False
     self._executor = ThreadPoolExecutor()
예제 #27
0
    def __init__(self, instructions, tables):
        self.instructions = instructions
        self.tables = tables

        self._input_interface = None
        self._output_interface = Queue()
        self._instructions = {}
        self._is_setup = False
        self._is_start = False

        self._setup()
예제 #28
0
def main():
    sfile = settings.BIG_FILE
    fsize = os.path.getsize(sfile)
    with  open(sfile, "r") as fh:
        chunks = size_chunks(fh, fsize, num_chunks=settings.BIGFILE_MP_CHUNKS)
    
    # Debug
    #for c in chunks:
        #print(c)
        
    q = Queue()
    pattern = re.compile(settings.TARGET_USERNAME)
    
    # consumer
    #con = multiprocessing.Process(target=opener, args=(cat(grep(pattern, writer())),))
    #con.daemon = True
    #con.start()
    
    # producer
    producers = []
    file_handles = []
    for chunk in chunks:    
        fh = open(sfile, "r")
        file_handles.append(fh)
        o = opener(cat(chunk, grep(pattern, writer(q))))
        t = multiprocessing.Process(target=sender, args=(o,))
        t.daemon = True
        producers.append(t)
        
    for p in producers:
        p.start()
        
    
    for p in producers:
        p.join()
        
    #con.join()
    q.put(None) # sentinel
    
    for f in file_handles:
        f.close()
        
    recsmatch = 0 
    print("Before queue comp")
    while True:
        x = q.get()
        if x == None:
            break
        recsmatch += 1
    print("After queue comp")
        
    
    print("recsmatch={r} chunks={c}".format(r=recsmatch,
                                        c=settings.BIGFILE_MP_CHUNKS))
예제 #29
0
    def __init__(self, instruction, tables):
        self._instruction = instruction
        self._tables = tables
        self.input_interface = Queue()
        self.output_interfaces = {}

        self._instruction_pipelines = []
        self._is_atomic_enabled = False
        self._is_sequential_enabled = False
        self._is_concurrent_enabled = False

        self._setup()
예제 #30
0
def multi_sim(CORES=2, T=100):
    
    results = []
    ques = [Queue() for i in range(CORES)]
    args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)]
    jobs = [Process(target=simulate, args=(a)) for a in args]
    for j in jobs: j.start()
    for q in ques: results.append(q.get())
    for j in jobs: j.join()
    S = np.hstack(results)

    return S