コード例 #1
0
def make_segments(imzml_reader, ibd_path, ds_segments_bounds, segments_dir,
                  sort_memory):
    n_cpus = os.cpu_count()
    ds_size = sum(imzml_reader.mzLengths) * (
        np.dtype(imzml_reader.mzPrecision).itemsize + 4 + 4)
    # TODO: Tune chunk_size to ensure no OOMs are caused
    chunk_size_to_fit_in_memory = sort_memory // 2 // n_cpus
    chunk_size_to_use_all_cpus = ds_size * 1.1 // n_cpus
    chunk_size = min(chunk_size_to_fit_in_memory, chunk_size_to_use_all_cpus)
    chunk_ranges = plan_dataset_chunks(imzml_reader, max_size=chunk_size)
    chunks_n = len(chunk_ranges)
    logger.debug(f'Reading dataset in {chunks_n} chunks: {chunk_ranges}')

    segm_sizes = []
    with ProcessPoolExecutor(n_cpus) as ex:
        chunk_tasks = [(imzml_reader, ibd_path, chunk_i, start, end,
                        ds_segments_bounds, segments_dir)
                       for chunk_i, (start, end) in enumerate(chunk_ranges)]
        for chunk_segm_sizes in ex.map(parse_and_segment_chunk, chunk_tasks):
            segm_sizes.extend(chunk_segm_sizes)

    ds_segms_len = (pd.DataFrame(segm_sizes, columns=[
        'segm_i', 'segm_size'
    ]).groupby('segm_i').segm_size.sum().sort_index().values)

    return chunks_n, ds_segms_len
コード例 #2
0
class MultiCommand:
    def __init__(
        self,
        commands: Sequence[Union[List[str], str]],
        config: Dict[str, Any],
        abort_reasons: Optional[List[str]] = None,
    ):
        self.processes: List[Future[Dict[str, Any]]] = []
        self.commands = commands
        self.config = config
        self.abort_reasons = abort_reasons
        self.process_pool_executor = ProcessPoolExecutor(
            max_workers=len(commands) if config["parallel"] else 1)

    def run(self) -> List[Dict[str, Any]]:
        for command in self.commands:
            if isinstance(command, str):
                command = [command]
            logger.debug(f'Spawning "{command}"')
            process = self.process_pool_executor.submit(
                retry_process, command, self.config, self.abort_reasons)
            self.processes += [process]

        # result() is blocking. The function will return when all processes are done
        return [process.result() for process in self.processes]
コード例 #3
0
ファイル: echoes.py プロジェクト: gollop/randovania
def generate_layout(permalink: Permalink,
                    status_update: Callable[[str], None],
                    validate_after_generation: bool,
                    timeout_during_generation: bool,
                    ) -> LayoutDescription:
    receiving_pipe, output_pipe = multiprocessing.Pipe(True)

    debug_level = debug.debug_level()
    if not permalink.spoiler:
        debug_level = 0

    def on_done(_):
        output_pipe.send(None)

    with ProcessPoolExecutor(max_workers=1) as executor:
        future = executor.submit(_generate_layout_worker, output_pipe, permalink, validate_after_generation,
                                 timeout_during_generation, debug_level)

        future.add_done_callback(on_done)

        while not future.done():
            message = receiving_pipe.recv()
            if message is not None:
                try:
                    status_update(message)
                except Exception:
                    receiving_pipe.send("close")
                    raise

        return future.result()
コード例 #4
0
def make_word_clouds(out_dir, freqs: ChatTermFreq, max_words=1000,
                     width=1024, height=768, overwrite=False,
                     executor=None, num_workers=None):
    out_dir.mkdir(parents=True, exist_ok=True)
    executor = executor or ProcessPoolExecutor(num_workers)
    save_wordcloud = partial(save_wordcloud_image,
        max_words=max_words, width=width, height=height, skip_existing=not overwrite)

    for key, counter in {'chat': freqs.of, **freqs.by_user}.items():
        out_path = out_dir / '{}_{}x{}.png'.format(slugify(key), width, height)
        executor.submit(save_wordcloud, out_path, counter)

    # For each user, compute the probability of using a term and keep the first 1000
    term_prob_by_user = {}
    for user, counter in freqs.by_user.items():
        total = sum(counter.values())
        term_prob_by_user[user] = {term: c / total for term, c in counter.most_common(1000)}

    # Compute how much each user is more likely to use a term wrt other users in the chat
    users = list(term_prob_by_user)
    n = len(users)
    for i, user in enumerate(users):
        others = users[:i] + users[i + 1:]
        user_prob = term_prob_by_user[user]
        others_prob = {term: sum(term_prob_by_user[name].get(term, 0) for name in others)
                       for term in user_prob}
        ratio = {term: n*p / (n*p + others_prob[term])
                 for term, p in user_prob.items()}
        ratio = {term: weight for term, weight in ratio.items() if weight > 1e-5}
        filename = 'Peculiar_of_{}_{}x{}.png'.format(slugify(user), width, height)
        executor.submit(save_wordcloud, out_dir / filename, ratio)

    executor.shutdown()
コード例 #5
0
    def get_candidates(self, table: Table) -> List[GeneratorResult]:
        """
        This method annotates each table column separately, by finding which are the column types and
        the relationships between the current column and the other.
        :param table: a list of search_keys, which must belong to the same table column
        :return: a list of GeneratorResult
        """
        col_search_keys = {}
        for cell in table.get_gt_cells():
            if cell.col_id not in col_search_keys:
                col_search_keys[cell.col_id] = []
            col_search_keys[cell.col_id].append(table.get_search_key(cell))

        col_search_keys = {col: chunk_list(search_keys, 500) for col, search_keys in col_search_keys.items()}

        if self._config.max_workers == 1:
            results = [self._get_candidates_for_column(search_keys)
                       for search_keys_list in col_search_keys.values()
                       for search_keys in search_keys_list]
        else:
            with ProcessPoolExecutor(self._config.max_workers) as pool:
                results = pool.map(self._get_candidates_for_column,
                                   [search_keys for search_keys_list in col_search_keys.values()
                                    for search_keys in search_keys_list])
        return functools.reduce(operator.iconcat, results, [])
コード例 #6
0
def process_sources(blg):
    """Processes the source json file for the category
    Fetches the content for the url for each individual source and,
    extracts blocked and unblocked domains from it and,
    appends it the unified blocked and unblocked domains for the category.
    """
    blg.data_json[blg.j_key.sources] = sorted(
        blg.data_json[blg.j_key.sources], key=lambda x: x[blg.i_key.name].upper()
    )
    with ProcessPoolExecutor() as pool:
        (
            blocked_domains,
            unblocked_domains,
            unblock_rules,
            regex_rules,
            cname_list,
        ) = zip(
            *pool.map(
                worker_process_sources,
                blg.data_json[blg.j_key.sources],
                repeat(blg),
                chunksize=10,
            )
        )

    blocked_domains = chain.from_iterable(blocked_domains)
    unblocked_domains = chain.from_iterable(unblocked_domains)
    unblock_rules = chain.from_iterable(unblock_rules)
    regex_rules = chain.from_iterable(regex_rules)
    cname_list = chain.from_iterable(cname_list)

    return blocked_domains, unblocked_domains, unblock_rules, regex_rules, cname_list
コード例 #7
0
 def _post_processing(self, result_dict: dict, **kwargs):
     img_input = result_dict["image"]
     img_mask = result_dict["mask"]
     img_pred = result_dict["image_pred"]
     img_recon_error = (img_input - img_pred) ** 2
     img_post = np.mean(img_recon_error, axis=1)
     with ProcessPoolExecutor(max_workers=8) as executor:
         futures = \
             [executor.submit(block_mean, pred, block_size=8) for pred in img_post]
         img_post = []
         for f in tqdm(futures, desc='post-processor', dynamic_ncols=True):
             img_post.append(f.result())
     img_post = np.array(img_post)
     img_post = (img_post - img_post.min()) / (img_post.max() - img_post.min())
     img_post = img_post.squeeze()
     binary_target = img_mask.squeeze().astype(np.bool)
     helper = ThresholdHelper(img_post, binary_target, metric='dice')
     best_thr, max_value = helper.get_best_threshold()
     binary_pred = (img_post > best_thr)
     print('best threshold: %g, max value: %g' % (best_thr, max_value))
     result_dict["score_pred"] = img_post.squeeze()
     result_dict["mask_pred"] = binary_pred.squeeze()
     result_dict["mask"] = binary_target.squeeze()
     result_dict["image"] = \
         restore_image_normalization(imgs=result_dict["image"], normalization=self.cfg.DATASET.normalization)
     result_dict["image_pred"] = \
         restore_image_normalization(imgs=result_dict["image_pred"], normalization=self.cfg.DATASET.normalization)
     return result_dict
コード例 #8
0
def main():
    rating_futures = []

    for facility in get_facilities_info():
        with ProcessPoolExecutor(max_workers=20) as executor:
            future = executor.submit(rate_facility, facility)
            rating_futures.append(future)

    successfully_processed = []
    failed_tasks = []

    for future in as_completed(rating_futures):
        err = future.exception()
        if not err:
            successfully_processed.append(future.result())
        else:
            failed_tasks.append(err.args[0])  # facility

    print('\nratings for facilities:')

    for facility, rating in sorted(successfully_processed, reverse=True,
                                   key=lambda x: x[1]):
        print(f'\t{facility}: {rating:.1f}%')

    if failed_tasks:
        print(f'\nrating process failed for {len(failed_tasks)} facilities:')
        for facility in failed_tasks:
            print(f'\t{facility}')
コード例 #9
0
ファイル: process-1.py プロジェクト: xzhdream/python-lib
def main():
    with ProcessPoolExecutor(max_workers=2) as ex:
        future = ex.submit(pow,2,4)
        print('-----')

        print(future.result())
        print('=====')
コード例 #10
0
def getPageRankOfDataset(ds):
    if type(ds) == str:
        ds = Helper.getDataFromDir(ds, mode='list')
    with ProcessPoolExecutor(max_workers=cpu_count(
            logical=Helper.logical())) as executor:
        fts = {}
        kfs = {}
        dsStr = listOfTaggedToString(ds)
        collection = list(map(lambda doc: doc.lower(), dsStr))
        cands = getAllCandidates(ds, deliver_as='sentences')
        info = getInfo(ds)
        for i, file in enumerate(ds):
            fts.update({
                executor.submit(getKeyphrases,
                                dsStr[i].lower(),
                                info,
                                candidates=cands[i],
                                doc_i=i,
                                collection=collection):
                file
            })
        for future in as_completed(fts):
            file = fts[future]
            kfs.update({file: future.result()})
    return kfs
コード例 #11
0
async def process_response(response_data):
    loop = asyncio.get_running_loop()
    with ProcessPoolExecutor() as pool:
        result = await loop.run_in_executor(
            pool, functools.partial(parse_xml_data, response_data))

    return result
コード例 #12
0
 def process_and_store_recordings(
         self,
         recordings: Sequence[Recording],
         output_manifest: Optional[Pathlike] = None,
         num_jobs: int = 1
 ) -> FeatureSet:
     if num_jobs == 1:
         # Avoid spawning subprocesses for single threaded processing
         feature_set = FeatureSet.from_features(
             tqdm(
                 chain.from_iterable(
                     map(self._process_and_store_recording, recordings)
                 ),
                 total=len(recordings),
                 desc='Extracting and storing features'
             )
         )
     else:
         with ProcessPoolExecutor(num_jobs, mp_context=multiprocessing.get_context('spawn')) as ex:
             feature_set = FeatureSet.from_features(
                 tqdm(
                     chain.from_iterable(
                         ex.map(self._process_and_store_recording, recordings)
                     ),
                     total=len(recordings),
                     desc='Extracting and storing features in parallel'
                 )
             )
     if output_manifest is not None:
         feature_set.to_file(output_manifest)
     return feature_set
コード例 #13
0
 async def test_send_signal_cancelled_future(self) -> None:
     """Process task should be able to terminate by method send_signal()."""
     with ProcessPoolExecutor() as executor:
         future = self.create_future(executor)
         assert future.cancel()
         await self.execute_test(future, SIGTERM, True, CancelledError)
     assert future.done()
コード例 #14
0
ファイル: processor.py プロジェクト: bausk/trader-monorepo
async def start_subprocess_and_listen(async_worker,
                                      backtest_session,
                                      strategy,
                                      db_q=None):
    from concurrent.futures.process import ProcessPoolExecutor
    from utils.processing.async_queue import AsyncProcessQueue

    q = AsyncProcessQueue()

    loop = get_event_loop_with_exceptions()
    print('running in executor')
    loop.run_in_executor(
        ProcessPoolExecutor(max_workers=1),
        _worker_sync,
        async_worker,
        q,
        backtest_session,
        strategy,
        db_q,
    )

    while True:
        result = await q.coro_get()
        if result is None:
            return
        yield result
コード例 #15
0
def parallel_processing(func, args, workers):
    '''
        Basic function for executing parallel process using same function for all workers.
    '''
    with ProcessPoolExecutor(workers) as ex:
        res = ex.map(func, args)
    return list(res)
コード例 #16
0
ファイル: server.py プロジェクト: zhangjunhandsome/grpclib
async def main(*, host: str = '127.0.0.1', port: int = 50051) -> None:
    with ProcessPoolExecutor(max_workers=4) as executor:
        server = Server([Primes(executor)])
        with graceful_exit([server]):
            await server.start(host, port)
            print(f'Serving on {host}:{port}')
            await server.wait_closed()
コード例 #17
0
ファイル: main.py プロジェクト: CastielWong/MVP
def main(mode: str) -> None:
    """Execute the main workflow.

    Args:
        mode: either "process" or "thread"
    """
    pool_executor = {
        "process": ProcessPoolExecutor(),
        "thread": ThreadPoolExecutor(),
    }
    print(f"Running in mode: {Fore.GREEN}{mode}")

    urls = [
        "https://talkpython.fm",
        "https://pythonbytes.fm",
        "https://google.com",
        "https://realpython.com",
        "https://training.talkpython.fm",
    ]

    work = []

    with pool_executor[mode] as executor:
        for url in urls:
            future: Future = executor.submit(get_title, url)
            work.append(future)

        print(f"{Fore.RESET}Waiting for downloads...")

    for future in work:
        print(f"{Fore.CYAN}{future.result()}")

    print(f"{Fore.RESET}Done")
コード例 #18
0
ファイル: data.py プロジェクト: yonlif/3D-Modeling
    def scan_object(self: 'Data') -> np.ndarray:
        """
        :return: A point representing an object captured from all the cameras that are on
        """
        depth_cams = [
            cam for cam in self.cameras
            if cam.on and cam.type is CameraType.Depth
        ]
        lidar_cams = [
            cam for cam in self.cameras
            if cam.on and cam.type is CameraType.LiDAR
        ]

        # Using processes means effectively side-stepping the Global Interpreter Lock
        with ProcessPoolExecutor(
                max_workers=min(self.max_workers,
                                len(depth_cams) + len(lidar_cams))) as pool:
            futures = [
                pool.submit(dcam.generate_adapted_point_cloud,
                            self.number_of_frames, self.number_of_dummy_frames)
                for dcam in depth_cams
            ]

            # LiDAR cameras cannot capture simultaneously, for now run them serially
            # TODO: defeat python's shitty concurrency model and make it more concurrent
            pcs = [
                lcam.generate_adapted_point_cloud(self.number_of_frames,
                                                  self.number_of_dummy_frames)
                for lcam in lidar_cams
            ]
            for fut in futures:
                pcs.append(fut.result())

        return np.concatenate(pcs)
コード例 #19
0
def process_execution():
    start = time.time()
    with ProcessPoolExecutor(max_workers=5) as executor:
        results  = executor.map(is_prime, NUMBERS)
    for result in  results:
        print(result)
    print('start : {}  Time taken : {}'.format(start, time.time()-start) )
コード例 #20
0
async def main(db_path, max_query_time):
    args = dict(initializer=initializer,
                initargs=(log, db_path, MainNetLedger, 0.25))
    workers = max(os.cpu_count(), 4)
    log.info(f"using {workers} reader processes")
    query_executor = ProcessPoolExecutor(workers, **args)
    tasks = [search(query_executor, constraints) for constraints in get_args()]
    try:
        results = await asyncio.gather(*tasks)
        times = {msg: ts for ts, msg in results}
        log.info("\n".join(
            sorted(filter(lambda msg: times[msg] > max_query_time,
                          times.keys()),
                   key=lambda msg: times[msg])))
    finally:
        query_executor.shutdown()
コード例 #21
0
async def startup():
    # dpc.init()
    dpc.ascloop = asyncio.get_event_loop(
    )  # overwrting the loop var initialized when dpc is imported, it is a different loop object in uvicorn
    app.state.executor = ProcessPoolExecutor(max_workers=1)
    app.state.emulator = SysPoolExecutor(max_workers=1)
    cfg.logger.info('Tsaisendo service environtment initiated')
コード例 #22
0
def find_optimal_weights_given_grid_points(
        grid_points: list,
        optimisation_parameters: optimisationParameters,
        use_process_pool: bool = False,
        num_processes: int = 8):

    grid_possibles = itertools.product(*grid_points)

    if use_process_pool:
        with ProcessPoolExecutor(max_workers=num_processes) as pool:
            results = pool.map(
                neg_return_with_risk_penalty_and_costs,
                grid_possibles,
                itertools.repeat(optimisation_parameters),
            )
    else:
        results = map(neg_return_with_risk_penalty_and_costs, grid_possibles,
                      itertools.repeat(optimisation_parameters))

    results = list(results)
    list_of_values = [result.value for result in results]
    optimal_value_index = list_of_values.index(min(list_of_values))

    optimal_weights_as_list = results[optimal_value_index].weights

    return optimal_weights_as_list
コード例 #23
0
async def add_sentiment_features(args):
    database = args["database"]
    dataset_db = f"sqlite:///{database}"

    loop = asyncio.get_event_loop()
    with ProcessPoolExecutor(max_workers=args["max_workers"]) as executor:
        await save_sentiment(args["batch_size"], dataset_db, executor, loop)
コード例 #24
0
def download_all_tf_models():
    with ProcessPoolExecutor() as executor:
        list(
            executor.map(
                partial(download_tf_model, model_dir="model"),
                # for a complete list of architecture name supported, see
                # mmdnn/conversion/examples/tensorflow/extractor.py
                [
                    "vgg16",
                    # "vgg19",
                    # "inception_v1",
                    # "inception_v3",
                    # "resnet_v1_50",
                    # # "resnet_v1_152",
                    # "resnet_v2_50",
                    # "resnet_v2_101",
                    # # "resnet_v2_152",
                    # # "resnet_v2_200",
                    # "mobilenet_v1_1.0",
                    # "mobilenet_v2_1.0_224",
                    # "inception_resnet_v2",
                    # "nasnet-a_large",
                    # "facenet",
                    # "rnn_lstm_gru_stacked",
                ],
            ))
コード例 #25
0
def search(states,
           planner,
           nn_model,
           ncpus,
           time_limit_seconds,
           search_budget=-1):
    """
    This function runs (best-first) Levin tree search with a learned policy on a set of problems    
    """
    total_expanded = 0
    total_generated = 0
    total_cost = 0

    slack_time = 600

    solutions = {}

    for name, state in states.items():
        state.reset()
        solutions[name] = (-1, -1, -1, -1)

    start_time = time.time()

    while len(states) > 0:

        #         args = [(state, name, nn_model, search_budget, start_time, time_limit_seconds, slack_time) for name, state in states.items()]
        #         solution_depth, expanded, generated, running_time, puzzle_name = planner.search(args[0])

        with ProcessPoolExecutor(max_workers=ncpus) as executor:
            args = ((state, name, nn_model, search_budget, start_time,
                     time_limit_seconds, slack_time)
                    for name, state in states.items())
            results = executor.map(planner.search, args)
        for result in results:
            solution_depth = result[0]
            expanded = result[1]
            generated = result[2]
            running_time = result[3]
            puzzle_name = result[4]

            if solution_depth > 0:
                solutions[puzzle_name] = (solution_depth, expanded, generated,
                                          running_time)
                del states[puzzle_name]

            if solution_depth > 0:
                total_expanded += expanded
                total_generated += generated
                total_cost += solution_depth

        partial_time = time.time()

        if partial_time - start_time + slack_time > time_limit_seconds or len(
                states) == 0 or search_budget >= 1000000:
            for name, data in solutions.items():
                print("{:s}, {:d}, {:d}, {:d}, {:.2f}".format(
                    name, data[0], data[1], data[2], data[3]))
            return

        search_budget *= 2
コード例 #26
0
 def test_indicators_picklable(self):
     bt = Backtest(SHORT_DATA, SmaCross)
     with ProcessPoolExecutor() as executor:
         stats = executor.submit(Backtest.run, bt).result()
     assert stats._strategy._indicators[
         0]._opts, '._opts and .name were not unpickled'
     bt.plot(results=stats, resample='2d', open_browser=False)
コード例 #27
0
def executor(kind: str, max_workers: int, daemon=True):
    """General purpose utility to get an executor with its as_completed handler

    This allows us to easily use other executors as needed.
    """
    if kind == "thread":
        with ThreadPoolExecutor(max_workers=max_workers) as pool_t:
            yield pool_t
    elif kind == "process":
        with ProcessPoolExecutor(max_workers=max_workers) as pool_p:
            yield pool_p
    elif kind in ["dask", "dask-process", "dask-thread"]:
        import dask
        import distributed
        from distributed.cfexecutor import ClientExecutor

        processes = kind == "dask" or kind == "dask-process"

        with dask.config.set({"distributed.worker.daemon": daemon}):
            with distributed.LocalCluster(
                    n_workers=max_workers,
                    processes=processes,
                    # silence_logs='error'
            ) as cluster:
                with distributed.Client(cluster) as client:
                    yield ClientExecutor(client)
    else:
        raise NotImplementedError("That kind is not implemented")
コード例 #28
0
def collect_frames_mp(_mvcap):
    """
    Multiprocess version of #collect_frames. It cuts ~10 seconds on i7-8750h delegating processing to a pool of
    processors. Less powerful CPUs could make this run slower than the standard version.
    :param _mvcap:
    :return:
    """
    avgdict: Dict[int, np.ndarray] = OrderedDict({})
    framecount = 0
    read, frame = _mvcap.read()

    def _callback(fut: Future):
        index, value = fut.result()
        avgdict[index] = value

    with ProcessPoolExecutor() as executor:
        while read:
            executor.submit(_indexedtask, framecount, frame).add_done_callback(_callback)
            framecount += 1
            for _ in range(30):      # The argument of range() is the number of skipped frames for every iteration.
                if read:
                    read = _mvcap.grab()
            if read:
                read, frame = _mvcap.read()
        executor.shutdown(True)
    res = []
    for i in range(framecount):
        res.append(avgdict[i])
    return res
コード例 #29
0
    async def test_rebuilds_process_pool_cooperatively(self):
        """
        Make sure that two parallel diffing failures only cause the process
        pool to be rebuilt once, not multiple times.
        """
        # Get a custom executor that will always fail the first time, but get
        # a real one that will succeed afterward.
        executor_resets = 0
        good_executor = ProcessPoolExecutor(1)
        bad_executor = BrokenProcessPoolExecutor()

        def get_executor(self, reset=False):
            nonlocal executor_resets
            if reset:
                executor_resets += 1
            if executor_resets > 0:
                return good_executor
            else:
                return bad_executor

        with patch.object(df.DiffHandler, 'get_diff_executor', get_executor):
            one = self.fetch_async('/html_source_dmp?format=json&'
                                   f'a=file://{fixture_path("empty.txt")}&'
                                   f'b=file://{fixture_path("empty.txt")}')
            two = self.fetch_async('/html_source_dmp?format=json&'
                                   f'a=file://{fixture_path("empty.txt")}&'
                                   f'b=file://{fixture_path("empty.txt")}')
            response1, response2 = await asyncio.gather(one, two)
            assert response1.code == 200
            assert response2.code == 200
            assert executor_resets == 1
            # Ensure *both* diffs hit the bad executor, so we know we didn't
            # have one reset because only one request hit the bad executor.
            assert bad_executor.submit_count == 2
コード例 #30
0
 def get_executor(self, reset=False):
     nonlocal did_get_executor
     if did_get_executor:
         return ProcessPoolExecutor(1)
     else:
         did_get_executor = True
         return BrokenProcessPoolExecutor()
コード例 #31
0
ファイル: test_utils.py プロジェクト: leeopop/coexecutor
def do_test3(workers):
    param = {"max_workers": workers}
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    pre_input1 = input_generator(workers, 0)
    pre_input2 = input_generator(workers, max(pre_input1))
    pre_input3 = input_generator(workers, max(pre_input2))

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    tstart = round(time.time()+1)
    input1 = [tstart + i for i in pre_input1]
    input2 = [tstart + i for i in pre_input2]
    input3 = [tstart + i for i in pre_input3]

    for x in input1:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    result_iter = texec.map(wake_at, input2)
    for x in input3:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    for x in result_iter:
        with lock:
            tresult.append(x)

    texec.shutdown(True)

    pstart = round(time.time() + _start_warm_up)
    input1 = [pstart + i for i in pre_input1]
    input2 = [pstart + i for i in pre_input2]
    input3 = [pstart + i for i in pre_input3]

    for x in input1:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    result_iter = pexec.map(wake_at, input2)
    for x in input3:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    for x in result_iter:
        with lock:
            presult.append(x)

    pexec.shutdown(True)

    cstart = round(time.time() + _start_warm_up)
    input1 = [cstart + i for i in pre_input1]
    input2 = [cstart + i for i in pre_input2]
    input3 = [cstart + i for i in pre_input3]

    async def async_main():
        for x in input1:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        result_iter = cexec.map(async_wake_at, input2)
        for x in input3:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        async for x in result_iter:
            with lock:
                cresult.append(x)
        await cexec.shutdown(False)

    loop.run_until_complete(async_main())

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - tstart) / _precision) for x in tresult]
    presult = [round((x - pstart) / _precision) for x in presult]
    cresult = [round((x - cstart) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t,p,c)
            assert False
        result = result and (p == c)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (c == t)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
    return result
コード例 #32
0
ファイル: test_utils.py プロジェクト: leeopop/coexecutor
def do_test1(workers):
    param = {"max_workers": workers}
    start = round(time.time() + _start_warm_up)
    input = input_generator(workers, start)
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    for x in input:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))

        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))

        future = cexec.submit(async_wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, cresult, lock))

    texec.shutdown(False)
    pexec.shutdown(False)
    loop.run_until_complete(cexec.shutdown(False))

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - start) / _precision) for x in tresult]
    presult = [round((x - start) / _precision) for x in presult]
    cresult = [round((x - start) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (p == c)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (c == t)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
    return result