예제 #1
0
 def __init__(self, cmd=None, waitForLine='Waitress serving Webware'):
     super().__init__()
     self.cmd = cmd or ['webware', 'serve']
     self.waitForLine = waitForLine
     self.outputQueue = SimpleQueue()
     self.pollQueue = Queue()
     self.stopQueue = SimpleQueue()
예제 #2
0
    def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):

        # establish ipc queues using a manager process
        task_queue = SimpleQueue()
        result_queue = SimpleQueue()

        # start process to generate image samples
        producer = Process(target=self._producer, args=(tasks, task_queue))
        producer.start()

        # start worker processes
        workers = []
        for pid in range(self._processes):
            p = Process(target=self._worker, args=(render, render_args, render_kwargs, task_queue, result_queue))
            p.start()
            workers.append(p)

        # consume results
        for _ in tasks:
            result = result_queue.get()
            update(result, *update_args, **update_kwargs)

        # shutdown workers
        for _ in workers:
            task_queue.put(None)
예제 #3
0
def test_fastq_parsing():

    inq = SimpleQueue()
    outq = SimpleQueue()

    rdpp = ReadDeduplicationParserProcess(inq=inq, 
                                          outq=outq, 
                                          save_hashed_dict_path=test_json_path)

    rdpp.start()
    
    inq.put(test_data)
    inq.put('END')
    _ = outq.get()

    rdpp.join()
    rdpp.terminate()

    with open(test_json_path) as r:
        result = ujson.load(r)


    hash_function = functools.partial(xxhash.xxh64_intdigest, seed=42)
    result_expected = {str(hash_function(r1.name + r2.name)): hash_function(r1.sequence + r2.sequence) for r1, r2 in test_data}


    assert len(result) == len(test_data)
    assert result == result_expected
    assert len({x for x in result.values()}) == 2 
예제 #4
0
def main():
    set_start_method("spawn")

    # Transmit queue
    q1 = SimpleQueue()
    q2 = SimpleQueue()

    # number of packet to send
    n = 20

    # delay time
    sleeptime = 1

    # max wait for ACK
    maxwait = 4

    # spawn receiver
    p = Process(target=machineRX, args=(q1, q2, n, sleeptime))
    p.start()

    # spawn transmitter
    machineTX(q1, q2, n, sleeptime, maxwait)

    # wait till end
    p.join()
예제 #5
0
def test_fastq_removal():

    inq = SimpleQueue()
    outq = SimpleQueue()

    with open(test_duplicates_path) as r:
        duplicates = ujson.load(r)
        duplicates_set = set(duplicates)


    rdrp = ReadDuplicateRemovalProcess(inq=inq, 
                                       outq=outq, 
                                       duplicated_ids=duplicates_set,
                                       )

    rdrp.start()
    
    inq.put(test_data)
    inq.put('END')
    result = outq.get()

    rdrp.join()
    rdrp.terminate()


    # Correct number of duplicates removed
    assert len(result)  == len(test_data_dedup)
    
    test_not_duplicated = [(r1.name, r2.name) for r1, r2 in result]
    expected_not_duplicated = [(r1.name, r2.name) for r1, r2 in test_data_dedup]

    # Correct duplicate names removed
    assert test_not_duplicated == expected_not_duplicated
예제 #6
0
    async def pump(self, input: Iterable[Any], output: Callable[[Any], None]):
        """
        The ``ProcessSection`` pump method works similar to the threaded version, however
        since communication between processes is not as simple as it is between threads,
        that are directly able to share memory with each other, there are some restrictions
        to be aware of.

        * Data that is to be sent to the input or transmitted on the output must be `pickleable
          <https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled>`_.
        * Since ``ProcessSection`` uses unbounded queues to transfer data behind the scenes, they
          are unable to provide or receive backpressure.
        """
        if input:
            input_queue = SimpleQueue()
        else:
            input_queue = None
        output_queue = SimpleQueue()
        process = Process(target=self._process_run_target,
                          args=(input_queue, output_queue))

        async def sender():
            async for item in input:
                await trio.to_thread.run_sync(input_queue.put, (item, ))
            await trio.to_thread.run_sync(input_queue.put, ())

        async with trio.open_nursery() as nursery:
            if input:
                nursery.start_soon(sender)
            process.start()
            while True:
                wrapped_item = await trio.to_thread.run_sync(output_queue.get)
                if wrapped_item == ():
                    break
                await output(wrapped_item[0])
            nursery.cancel_scope.cancel()
예제 #7
0
    def modbus_start(self, ip, port):
        queue_receive = SimpleQueue()
        queue_send = SimpleQueue()
        Process(target=server.run_callback_server,
                args=(ip, port, queue_receive, queue_send)).start()

        while True:
            device, value = queue_receive.get()
            print("Registrador: %s -> Valor: %s" % (device, value))
            print(device, value)
            regType = device[0:2]
            regNum = device[2]

            # Bool enviados para o cliente (python -> labview)
            if (regType == "di"):
                queue_send.put([1, 0, 0, 1, 1, 0, 0, 1])

            # Bool recebidos do cliente (labview -> python)
            if (regType == "co"):
                if (value[0] == 1):
                    self.measure.emit()

            # Int enviados para o cliente (python -> labview)
            if (regType == "ir"):
                self.inputReg.emit(regNum)
                # tempo para garantir que a variavel vai ser atualizada
                sleep(0.1)
                queue_send.put([self.inputRegData])

            # Int recebido do cliente (labview -> python)
            if (regType == "hr"):
                self.holdReg.emit(regNum, value[0])
예제 #8
0
파일: loader.py 프로젝트: yangwangx/GulpIO
    def __init__(self, loader):
        self.dataset = loader.dataset
        self.collate_fn = loader.collate_fn
        self.batch_sampler = loader.batch_sampler
        self.num_workers = loader.num_workers
        self.done_event = threading.Event()

        self.sample_iter = iter(self.batch_sampler)

        if self.num_workers > 0:
            self.index_queue = SimpleQueue()
            self.data_queue = SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.workers = [
                Process(target=_worker_loop,
                        args=(self.dataset, self.index_queue, self.data_queue,
                              self.collate_fn))
                for _ in range(self.num_workers)
            ]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
예제 #9
0
파일: Mokrwaze.py 프로젝트: Krolov18/Gloses
def genere_grille(n: int, corpus: list):
    corpus = list(filter(lambda x: len(x) == n, corpus))
    print(len(corpus))
    # Création des processus
    tasks = SimpleQueue()
    consumers = list()
    for i in range(1, n):
        data = list(
            itertools.chain(*[[
                Grid(shape=(n, n), values=z, jonction=(i, i))
                for z in itertools.combinations(v, r=2)
            ] for _, v in itertools.groupby(key=lambda x: x[i],
                                            iterable=corpus)]))
        consumers.append(
            Consumer(task_queue=SimpleQueue(), data=data, i=i, n=n))

    # Remplissage du premier processus
    for x in itertools.chain(
            *[[z for z in itertools.combinations(v, r=2)]
              for _, v in itertools.groupby(corpus, lambda x: x[0])]):
        consumers[0].reception.put(x)
        print(x)
        # Task(Grid(shape=(n, n), values=x, jonction=(0, 0)), seuil=1000, db="Mokrwaze.db")
    # Consumer.consumers[1].reception.put(None)
    print('done')
예제 #10
0
    def __init__(self, key, task_group, randomize):
        self.key = key
        self.gen_worker = task_group['gen_worker']
        self.task_ids = task_group['task_ids']
        self.is_parallel = task_group['is_parallel']
        if self.is_parallel:
            self.randomize = randomize
            if self.randomize:
                random.shuffle(self.task_ids)
        else:
            self.randomize = False
        self.result_queue = SimpleQueue()
        self.task_queue = SimpleQueue()

        # Don't expose queues file descriptors over Popen to, say, tarantool
        # running tests.
        set_fd_cloexec(self.result_queue._reader.fileno())
        set_fd_cloexec(self.result_queue._writer.fileno())
        set_fd_cloexec(self.task_queue._reader.fileno())
        set_fd_cloexec(self.task_queue._writer.fileno())

        for task_id in self.task_ids:
            self.task_queue.put(task_id)
        self.worker_ids = set()
        self.done = False
        self.done_task_ids = set()
예제 #11
0
def main() -> None:
    if len(sys.argv) < 2:  # <1>
        workers = cpu_count()
    else:
        workers = int(sys.argv[1])

    print(f'Checking {len(NUMBERS)} numbers with {workers} processes:')

    jobs: JobQueue = SimpleQueue()  # <2>
    results: ResultQueue = SimpleQueue()
    t0 = perf_counter()

    for n in NUMBERS:  # <3>
        jobs.put(n)

    for _ in range(workers):
        proc = Process(target=worker, args=(jobs, results))  # <4>
        proc.start()  # <5>
        jobs.put(0)  # <6>

    while True:
        n, prime, elapsed = results.get()  # <7>
        label = 'P' if prime else ' '
        print(f'{n:16}  {label} {elapsed:9.6f}s')  # <8>
        if jobs.empty():  # <9>
            break

    elapsed = perf_counter() - t0
    print(f'Total time: {elapsed:.2f}s')
예제 #12
0
파일: Grid.py 프로젝트: rsarbaev/PuzzleLib
def generateGridInfo(size, devices):
	devices = range(size) if devices is None else devices

	queues = [(SimpleQueue(), SimpleQueue()) for _ in range(size - 1)]
	parent = ParentNode(0, size, devices[0], queues)

	nodes = [ChildNode(index + 1, size, devices[index + 1], queues[index]) for index in range(size - 1)]
	return [parent] + nodes
예제 #13
0
    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        processes = []
        # Here the blocks will be passed to subprocesses
        data_queue = SimpleQueue()
        # Here the results will be passed back
        result_queue = SimpleQueue()
        for x in range(self.n_jobs):
            processes.append(
                mp.Process(target=_parallel_fit,
                           args=(self.fit_, self.partial_fit_,
                                 self.base_estimator, self.verbose, data_queue,
                                 result_queue)))
            processes[-1].start()

        # First n_jobs blocks are sent into the queue without waiting for the
        # results. This variable is a counter that takes care of this.
        presend = 0
        blocks_computed = 0
        blocks_all = len(np.unique(blocks))

        for block in self._blocks(X, y, blocks):
            if presend >= self.n_jobs:
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer
            else:
                presend += 1
            if self.partial_fit_:
                if block[0] in self.clusterers_:
                    data_queue.put(('middle', block, self.clusterers_[b]))
                    continue

            data_queue.put(('middle', block, None))

        # Get the last results and tell the subprocesses to finish
        for x in range(self.n_jobs):
            if blocks_computed < blocks_all:
                print("%s blocks computed out of %s" %
                      (blocks_computed, blocks_all))
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer

        data_queue.put(('end', None, None))

        time.sleep(1)

        return self
예제 #14
0
    def __init__(self, rate=None, debug=False):
        super().__init__(rate=rate)
        self._rate = rate
        self._send_to = None
        self._recv_from = None
        self.debug = debug
        #############################################################################
        # TODO: ADD YOUR NECESSARY ATTRIBUTES HERE
        #############################################################################
        # 另一通信方地址,如果是server则为None
        self.dst_addr = None

        self.send_queue = SimpleQueue()  # 等待被发送出去的bytes
        self.recv_queue = SimpleQueue()  # 接受的未被转换的bytes
        self.transmit_queue = SimpleQueue(
        )  # 需要发送的数据 {‘data’:bytes, 'is_end':bool}
        self.waiting_for_ack = []  # 已发送等待确认的datagram缓存
        self.timers = {}  # 已发送等待确认的datagram计时器
        self.recv_datagram_buf = {}  # 乱序到达的datagram
        self.recv_data_buffer = [b'']  # 收到的数据缓存
        self.recv_data_lock = Lock()
        self.send_waiting_lock = Lock()
        self.status_lock = Lock()

        # 连接状态
        self.seq = -1
        self.seqack = -1
        self.seq_bias = 0
        self.duplicate_cnt = 0

        # 窗口状态
        self.win_idx, self.win_size = 0, 5

        # 超时设置
        self.SRTT = 3
        self.DevRTT = 0
        self.RTO = 3

        # 已创建的地址与accept()返回的端口
        self.conns = {}
        self.conn = None

        # 线程相关
        self.status = Status.Active
        self.send_thread = Thread(target=self.send_threading)
        self.recv_thread = Thread(target=self.recv_threading)
        self.transmit_thread = Thread(target=self.transmit_threading)
        self.process_thread = Thread(target=self.process_threading)
        self.transmit_thread.start()
        self.process_thread.start()
        self.recv_thread.start()
        self.send_thread.start()
예제 #15
0
def compile_model(model_file, output_file=None):
    """  Compile network in separate thread

    To avoid initialising Theano in main thread, compilation must be done in a
    separate process. The reason for avoidance is that forking a process after
    CUDA context is initialised is not supported, so, if this is a multiprocess
    run, processes must be created before importing theano.sandbox.cuda.

    Where the network is already compiled, a temporary copy is created.

    :param model_file: File to read network from
    :param output_file: File to output to.  If None, generate a filename

    :returns: A filename containing a compiled network.
    """
    queue = SimpleQueue()
    p = Process(target=_compile_model, args=(queue, model_file, output_file))
    p.start()
    p.join()
    if p.exitcode != 0:
        output_file = None
        raise ValueError("Model file {} was neither a network nor compiled network".format(model_file))
    else:
        output_file = queue.get()

    return output_file
예제 #16
0
    def get_novel_dict(self, chapter_url_list: list) -> dict:
        result = self.downloader.get_result(chapter_url_list)
        result.show_time_cost()
        result.show_urls_status()
        print(" 重试失败章节 ".center(shutil.get_terminal_size().columns - 7, '*'))
        result.retry_failed_urls()
        result.show_urls_status()

        print(" 分离章节内容 ".center(shutil.get_terminal_size().columns - 7, '*'))
        process_number = self.downloader.config.get_config(
            "multi", "process_number")
        process_number = int(process_number //
                             1.5) if process_number > 2 else process_number
        queue = SimpleQueue()
        for i in range(process_number):
            Process(target=self.fill_novel_dict,
                    args=(chapter_url_list[i::process_number],
                          result.get_urls_detail_dict(), queue)).start()
        for i in tqdm(range(len(chapter_url_list)),
                      total=len(chapter_url_list),
                      desc="分离章节内容",
                      unit="章节",
                      postfix={"process": process_number}):
            queue.get()

        return result.get_urls_detail_dict()
예제 #17
0
파일: helpers.py 프로젝트: paulgzlz/RD-MCL
 def __init__(self, db_file="sqlite_db.sqlite", lock_wait_time=120):
     self.db_file = db_file
     self.connection = sqlite3.connect(self.db_file)
     self.broker_cursor = self.connection.cursor()
     self.broker_queue = SimpleQueue()
     self.broker = None
     self.lock_wait_time = lock_wait_time
예제 #18
0
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()
        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError('max_workers must be greater than 0')
            self._max_workers = max_workers
        self._call_queue = multiprocessing.Queue(self._max_workers +
            EXTRA_QUEUED_CALLS)
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        self._processes = {}
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}
예제 #19
0
def main ():
    #global recognizer
    #global microphone
    #global connection
    
    #recognizer = sr.Recognizer()
    #microphone = sr.Microphone (device_index=0)
    connection = SimpleQueue ()
    
    #speech_parser   = Speech_parser ()
    #speech_parser.set_connection (connection)
    
    #speech_parser   = Process (target=run_audio_recognition, args=(connection,))
    speech_parser = 0
    #freeze_support ()
    #speech_parser.start ()
    
    robot           = Robot ("192.168.137.150", 9555)
    #robot           = Robot ("10.0.0.102", 9555)
    words_processor = Words_processor ()
    dialogue_system = Dialogue_system  ()
    
    robot_state = Robot_state (speech_parser, robot, words_processor, connection, dialogue_system)
    
    root = Tk ()
    root.geometry ("730x580")
    
    GUI  = Main_window (root, robot_state)
    
    root.mainloop ()
예제 #20
0
    def __handle_client_connection(self, client_sock):

        try:
            msg = client_sock.recv(op_code_size).decode()

            logging.info('Message received from connection {}. Msg: {}'.format(
                client_sock.getpeername(), msg))

            if (msg == done_op_code):
                return "done"

            if (msg == new_sc_code):
                self._last_slave_tasked = (self._last_slave_tasked +
                                           1) % number_sub_slaves
                elements_of_procces_to_task = self._sub_dic[
                    self._last_slave_tasked]

                msg = client_sock.recv(new_sc_max_lenght).decode().split(",")
                ip = msg[0]
                port = int(msg[1])
                interval = int(msg[2])
                path = msg[3]
                logging.info("asked to backup {}".format(ip + "," + str(port) +
                                                         "," + path))

                task_list = elements_of_procces_to_task[0]
                task_to_do = (ip, port, path, interval)
                task_list.append((datetime.now(), task_to_do))

                list_of_ip_port = elements_of_procces_to_task[1]
                list_of_ip_port.append((ip, path))

                lock_of_procces = elements_of_procces_to_task[2]
                self._lock_dic[(ip, path)] = lock_of_procces

            if (msg == get_bu_code):
                msg = client_sock.recv(new_sc_max_lenght).decode().split(",")
                ip = msg[0]
                path = msg[1]
                queue = SimpleQueue()
                queue.put(client_sock)
                Process(target=get_backup,
                        args=(ip, path, self._lock_dic[(ip, path)],
                              queue)).start()

            if (msg == stop_bu_code):
                msg = client_sock.recv(new_sc_max_lenght).decode().split(",")
                ip = msg[0]
                path = msg[1]

                procces_number = self._get_procces_for_task(ip, path)
                if (procces_number or procces_number == 0):
                    elements_of_procces_to_task = self._sub_dic[procces_number]
                    task_list = elements_of_procces_to_task[0]
                    task_to_do = (ip, 0, path, -1)
                    task_list.append((datetime.now(), task_to_do))

        except OSError as e:
            logging.info("Error while reading socket {}, {}".format(
                client_sock, e))
예제 #21
0
    def __CheckResults_PhySim(self, HTML, CONST, testcase_id):
        mySSH = sshconnection.SSHConnection()
        mySSH.open(self.eNBIpAddr, self.eNBUserName, self.eNBPassWord)
        #retrieve run log file and store it locally$
        mySSH.copyin(self.eNBIpAddr, self.eNBUserName, self.eNBPassWord,
                     self.__workSpacePath + self.__runLogFile, '.')
        mySSH.close()
        #parse results looking for Encoding and Decoding mean values
        self.__runResults = []
        with open(self.__runLogFile) as f:
            for line in f:
                if 'mean' in line:
                    self.__runResults.append(line)
        #the values are appended for each mean value (2), so we take these 2 values from the list
        info = self.__runResults[0] + self.__runResults[1]

        #once parsed move the local logfile to its folder for tidiness
        os.system('mv ' + self.__runLogFile + ' ' + self.__runLogPath + '/.')

        #updating the HTML with results
        html_cell = '<pre style="background-color:white">' + info + '</pre>'
        html_queue = SimpleQueue()
        html_queue.put(html_cell)
        HTML.CreateHtmlTestRowQueue(self.runargs, 'OK', 1, html_queue)
        return HTML
예제 #22
0
 def call(*args, **kwargs):
     terminate = Event()
     queue = SimpleQueue()
     
     process = IProcess(target=Main,
                     args=(terminate, queue, func, args, kwargs),
                     daemon=daemon)
     process.start()
     
     #Get results/errors
     result, exception = queue.get()
     
     process.join(timeout=terminate)
     while process.is_alive():
         process.terminate()
     
     #catch errors raised by the callback
     try:
         if callback is not None:
             callback(*cb_args, **cb_kwargs)
     except:
         if exception:
             result += (_format_tb(*sys.exc_info()),)
         else:
             result, exception = (_format_tb(*sys.exc_info()),), True
     
     if exception:
         _raise_tb_stack(result)
     else:
         return result
예제 #23
0
 def __init__(self, init_objs: Iterable[T]=[]):
     self._log = logging.getLogger('ThreadsafeObjectPool')
     log = self._log.getChild('__init__')
     log.info('Creating ThreadsafeObjectPool.')
     self._pool = SimpleQueue()
     for obj in init_objs:
         self.release(obj)
예제 #24
0
def hydrate(cfs):
    jobs = []
    squeue = SimpleQueue()
    data_dirs = ['2020-01', '2020-02', '2020-03', '2020-04']

    # create folder here to avoid data racing
    if not os.path.isdir('hydrated'):
        os.makedirs('hydrated')

    for data_dir in data_dirs:
        full = os.path.join('hydrated', data_dir)
        print(full)
        if not os.path.isdir(full):
            os.makedirs(full)

    fc_process = Process(target=file_creator, args=(
        squeue,
        len(cfs),
    ))
    jobs.append(fc_process)
    fc_process.daemon = True
    fc_process.start()

    for cf in cfs:
        # unpack keys
        account = cf['account']
        p = Process(target=hydrated_cycle, args=(
            account,
            squeue,
        ))
        jobs.append(p)
        p.daemon = True
        p.start()

    [j.join() for j in jobs]
예제 #25
0
파일: _export.py 프로젝트: haadifzhari/lab2
def export_table(host, port, auth_key, db, table, directory, fields, delimiter,
                 format, error_queue, progress_info, sindex_counter,
                 exit_event):
    writer = None

    try:
        # This will open at least one connection for each rdb_call_wrapper, which is
        # a little wasteful, but shouldn't be a big performance hit
        conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
        table_info = rdb_call_wrapper(conn_fn, "info", write_table_metadata,
                                      db, table, directory)
        sindex_counter.value += len(table_info["indexes"])

        task_queue = SimpleQueue()
        writer = launch_writer(format, directory, db, table, fields, delimiter,
                               task_queue, error_queue)
        writer.start()

        rdb_call_wrapper(conn_fn, "table scan", read_table_into_queue, db,
                         table, table_info["primary_key"], task_queue,
                         progress_info, exit_event)
    except (r.ReqlError, r.ReqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message),
                         traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer is not None and writer.is_alive():
            task_queue.put(StopIteration())
            writer.join()
예제 #26
0
    def __init__(self, parent=None, *args, **kwargs):
        """
        Initialise audio processing.
        In debug mode, the audio data is saved to file.
        """
        self.sdk = parent.sdk
        self.sample_size = parent.sample_size
        self.block_size = parent.block_size
        self.sample_format = parent.sample_format
        self.process_input_fn = parent.process_input_fn
        self.sample_rate = float(parent.sdk.sample_rate)

        self.block_period = self.block_size / self.sample_rate or 0.1
        self.wav_filename = parent.wav_filename or self.DEBUG_AUDIO_FILENAME
        self.input_queue = SimpleQueue()
        super(AudioProcessingThread, self).__init__(*args, **kwargs)

        if self.sdk.debug:
            import soundfile as sf
            self.wav_file = sf.SoundFile(self.wav_filename,
                                         mode='w',
                                         channels=1,
                                         samplerate=self.sdk.sample_rate)

        self.daemon = True
        self.start()
예제 #27
0
def importData(simulator):
    testSim = simulator
    q = SimpleQueue()
    jobs = []
    PERIOD_SIZE = 50
    BATCH_SIZE = 100
    BATCH_COUNT = 1
    BATCH_OFFSET = 100
    dates = testSim.getAllDates()
    index = list(range(BATCH_COUNT))
    feed = []
    threads = 16

    running = False
    count = 0
    while 1:
        if count < threads:
            for i in random.sample(index, threads-count if len(index) >= threads-count else len(index)):
                p = Process(target=testSim.processTimePeriod, args=(q, PERIOD_SIZE, dates, BATCH_SIZE * (i + BATCH_OFFSET) + PERIOD_SIZE, BATCH_SIZE))
                jobs.append(p)
                p.start() 
                index.remove(i)
        count = 0
        for p in jobs:
            if not p.is_alive():
                p.terminate()
                jobs.remove(p)
            else:
                count += 1
        while not q.empty():
            print('Getting')
            feed.append(q.get())
        if count == 0 and len(index) == 0: 
            break
    return feed
예제 #28
0
def feeder(input, tssv_library, indel_score, has_iupac, workers, chunksize,
           done_queue):
    """
    Start worker processes, feed them sequences from input and have them
    write their results to done_queue.
    """
    task_queue = SimpleQueue()
    processes = []
    for i in range(workers):
        process = Process(target=worker,
                          args=(tssv_library, indel_score, has_iupac,
                                task_queue, done_queue))
        process.daemon = True
        process.start()
        processes.append(process)
    while True:
        # Sending chunks of reads to the workers.
        task = tuple(itertools.islice(input, chunksize))
        if not task:
            break
        task_queue.put(task)
    for i in range(workers):
        task_queue.put(None)
    for process in processes:
        process.join()
    done_queue.put(None)
예제 #29
0
    def process_file(self):
        if self.workers == 1:
            for seq in self.dedup_reads():
                self.cache_results(
                    seq,
                    process_sequence(self.tssv_library, self.indel_score,
                                     self.has_iupac, seq))
        else:
            # Start worker processes.  The work is divided into tasks that
            # require about 1 million alignments each.
            done_queue = SimpleQueue()
            chunksize = 1000000 // (4 * len(self.tssv_library)) or 1
            thread = Thread(target=feeder,
                            args=(self.dedup_reads(), self.tssv_library,
                                  self.indel_score, self.has_iupac,
                                  self.workers, chunksize, done_queue))
            thread.daemon = True
            thread.start()

            # Process the results as they come in.
            # Below is speed-optimized to manage as many workers as possible.
            acquire_lock = self.lock.acquire
            release_lock = self.lock.release
            cache_results = self.cache_results
            for seq, results in itertools.chain.from_iterable(
                    iter(done_queue.get, None)):
                acquire_lock()
                cache_results(seq, results)
                release_lock()
            thread.join()

        # Count number of unique sequences per marker.
        for marker in self.tssv_library:
            self.counters[marker]["unique_seqs"] = len(self.sequences[marker])
def redis_matching(crypto):
    # Data is enriched in logstash
    conf = Configuration()
    r = redis.StrictRedis(host=conf['redis']['host'], port=conf['redis']['port'], db=conf['redis']['db'])

    lock = Lock()
    match = SimpleQueue()
    if args.multiprocess > 0:
        n = min(args.multiprocess, cpu_count()-1)
        processes = list()
        for i in range(n):
            process = Process(target=redis_matching_process, args=(r, match, lock, crypto))
            process.start()
            processes.append(process)

        # Print match(es)
        print_process = Process(target=print_queue_process, args=([match]))
        print_process.start()
        for process in processes:
            process.join()
        print_process.terminate()
    else:
        redis_matching_process(r, match, lock, crypto)
        for item in iterator_result(match):
            print(item)