def test_resolve_distributed_mode_slurm3():
    args = argparse.Namespace(
        multiprocessing_distributed=True,
        dist_world_size=None,
        dist_rank=None,
        ngpu=1,
        local_rank=None,
        dist_launcher="slurm",
        dist_backend="nccl",
        dist_init_method="env://",
        dist_master_addr=None,
        dist_master_port=10000,
    )
    env = dict(
        SLURM_PROCID="0",
        SLURM_NTASKS="1",
        SLURM_STEP_NUM_NODES="1",
        SLURM_STEP_NODELIST="localhost",
        SLURM_NODEID="0",
        CUDA_VISIBLE_DEVICES="0,1",
    )

    e = ProcessPoolExecutor(max_workers=2)
    with unittest.mock.patch.dict("os.environ", dict(env, SLURM_LOCALID="0")):
        resolve_distributed_mode(args)
        option = build_dataclass(DistributedOption, args)
        fn = e.submit(option.init)

    with unittest.mock.patch.dict("os.environ", dict(env, SLURM_LOCALID="0")):
        option2 = build_dataclass(DistributedOption, args)
        fn2 = e.submit(option2.init)

    fn.result()
    fn2.result()
Esempio n. 2
0
class PendantFeaturesService:
    @inject
    def __init__(self,
                 default_params_factory: PendantFeaturesParamsFactory) -> None:
        self._executor = ProcessPoolExecutor(max_workers=1)
        self._default_params_factory = default_params_factory

    def extract(
        self,
        image: np.ndarray,
        params: Optional[PendantFeaturesParams] = None,
        *,
        labels: bool = False,
    ) -> asyncio.Future:
        if params is None:
            params = self._default_params_factory.create()

        cfut = self._executor.submit(
            extract_pendant_features,
            image,
            params.drop_region,
            params.needle_region,
            thresh1=params.thresh1,
            thresh2=params.thresh2,
            labels=labels,
        )

        fut = asyncio.wrap_future(cfut, loop=asyncio.get_event_loop())
        return fut

    def destroy(self) -> None:
        self._executor.shutdown()
Esempio n. 3
0
class MultiCommand:
    def __init__(
        self,
        commands: Sequence[Union[List[str], str]],
        config: Dict[str, Any],
        abort_reasons: Optional[List[str]] = None,
    ):
        self.processes: List[Future[Dict[str, Any]]] = []
        self.commands = commands
        self.config = config
        self.abort_reasons = abort_reasons
        self.process_pool_executor = ProcessPoolExecutor(
            max_workers=len(commands) if config["parallel"] else 1)

    def run(self) -> List[Dict[str, Any]]:
        for command in self.commands:
            if isinstance(command, str):
                command = [command]
            logger.debug(f'Spawning "{command}"')
            process = self.process_pool_executor.submit(
                retry_process, command, self.config, self.abort_reasons)
            self.processes += [process]

        # result() is blocking. The function will return when all processes are done
        return [process.result() for process in self.processes]
Esempio n. 4
0
class ConanFeaturesService:
    @inject
    def __init__(self, default_params_factory: ConanParamsFactory) -> None:
        self._executor = ProcessPoolExecutor(max_workers=1)
        self._default_params_factory = default_params_factory

    def extract(self,
                image: np.ndarray,
                params: Optional[ConanFeaturesParams] = None,
                *,
                labels: bool = False) -> asyncio.Future:
        params = params or self._default_params_factory.create()
        params_dict = {
            'baseline': params.baseline,
            'inverted': params.inverted,
            'thresh': params.thresh,
            'roi': params.roi,
            'labels': labels,
        }
        cfut = self._executor.submit(extract_contact_angle_features, image,
                                     **params_dict)
        fut = asyncio.wrap_future(cfut, loop=asyncio.get_event_loop())
        return fut

    def destroy(self) -> None:
        self._executor.shutdown()
Esempio n. 5
0
def submit_job(database_api, dataset_api, email, dataset, job_name, job_type,
               params):
    global executor

    is_serve = os.environ.get(CIRRO_SERVE) == 'true'
    if executor is None:
        max_workers = int(
            os.environ.get(CIRRO_MAX_WORKERS, '2' if is_serve else '1'))
        if max_workers > 0:
            import multiprocessing
            from concurrent.futures.process import ProcessPoolExecutor
            from concurrent.futures.thread import ThreadPoolExecutor
            executor = ProcessPoolExecutor(
                max_workers=max_workers,
                mp_context=multiprocessing.get_context('spawn')
            ) if is_serve else ThreadPoolExecutor(max_workers=max_workers)
    job_id = database_api.create_job(email=email,
                                     dataset_id=dataset['id'],
                                     job_name=job_name,
                                     job_type=job_type,
                                     params=params)
    if executor is not None:
        future = executor.submit(run_job, email, job_id, job_type, dataset,
                                 params,
                                 database_api if not is_serve else None,
                                 dataset_api if not is_serve else None)
        future.add_done_callback(done_callback)
        job_id_2_future[job_id] = future
    else:
        run_job(email, job_id, job_type, dataset, params,
                database_api if not is_serve else None,
                dataset_api if not is_serve else None)
    return job_id
Esempio n. 6
0
def test(path, run_id, runs):
    # load parser and data handler
    parser = CYKParser.load(path)
    data_handler = DataHandler(config.test_set, run_id, runs)

    # parse sentences in parallel
    executor = ProcessPoolExecutor(config.processes)
    futures = [
        executor.submit(parse_tree, parser, sent, run_id)
        for sent in data_handler.generator()
    ]

    # following code is to track progress
    kwargs = {
        'total': len(futures),
        'unit': 'nap',
        'unit_scale': True,
        'leave': True
    }
    for _ in tqdm(as_completed(futures), **kwargs):
        pass
    for future in futures:
        if future.exception() is not None:
            print(future.exception())

    # stitch files if number of runs is 1
    if runs == 1:
        stitch_files()
    print("Done parsing")
Esempio n. 7
0
 def _schedule_user_task(self, executor: ProcessPoolExecutor,
                         future_set: dict, task: TaskInfo) -> Future:
     task.start_task()
     future = executor.submit(task.method, *task.args, **task.kwargs)
     future.add_done_callback(
         lambda x: (self._user_task_done_callback(x, future_set, task)))
     future_set[future] = task
     return future
def main():
    pool = ProcessPoolExecutor(max_workers=int(multiprocessing.cpu_count() *
                                               0.5))
    # pool = ProcessPoolExecutor(max_workers=1)
    pages_2 = [{
        'id': "p1",
        'type': 'STACK',
        'constraint': None
    }, {
        'id': "p2",
        'type': 'STACK',
        'constraint': None
    }]
    pages_3 = [{
        'id': "p1",
        'type': 'STACK',
        'constraint': None
    }, {
        'id': "p2",
        'type': 'STACK',
        'constraint': None
    }, {
        'id': "p3",
        'type': 'STACK',
        'constraint': None
    }]
    base_constraints = [
        {
            "type": "EDGES_TO_SUB_ARC_ON_PAGES",
            "arguments": ["0", "1"],  # the outer terminals
            "modifier": ["p1", "p2"]  # the pages
        },
        # inner terminals are after one outer terminal and before the other
        {
            "type": "NODES_PREDECESSOR",
            "arguments": ["0"],
            "modifier": ["2", "3"],
        },
        {
            "type": "NODES_PREDECESSOR",
            "arguments": ["2", "3"],
            "modifier": ["1"],
        }
    ]
    set_printing(False)
    onlyfiles = [
        os.path.join('500-random-planar-3-trees', f)
        for f in listdir(path='500-random-planar-3-trees')
        if isfile(os.path.join('500-random-planar-3-trees', f))
    ]
    for file in onlyfiles:
        with open(file, mode="r") as f:
            graph_str = f.read()
            future = pool.submit(do_experiment, base_constraints, pages_2,
                                 pages_3, graph_str)
            future.add_done_callback(callback)

    pool.shutdown(wait=True)
def main():
    pool = ProcessPoolExecutor(max_workers=int(multiprocessing.cpu_count() -
                                               2))
    # pool = ProcessPoolExecutor(max_workers=1)
    pages = [{
        'id': "p1",
        'type': 'STACK',
        'constraint': None
    }, {
        'id': "p2",
        'type': 'STACK',
        'constraint': None
    }, {
        'id': "p3",
        'type': 'STACK',
        'constraint': None
    }]
    base_constraints = [
        {
            "type": "EDGES_TO_SUB_ARC_ON_PAGES",
            "arguments": ["0", "1"],  # the outer terminals
            "modifier": ["p1", "p2"]  # the pages
        },
        # inner terminals are after one outer terminal and before the other
        {
            "type": "NODES_PREDECESSOR",
            "arguments": ["0"],
            "modifier": ["2", "3"],
        },
        {
            "type": "NODES_PREDECESSOR",
            "arguments": ["2", "3"],
            "modifier": ["1"],
        }
    ]
    set_printing(False)
    experiments = [
        # (graph_generation.random_planar_gh, 300, [15, 10]),
        (graph_generation.random_planar, 1000, [110]),
        # (graph_generation.spine_graph, 1, list(range(10, 350, 1)))
    ]
    with open("results_random_planar_110.json", mode="a") as f:

        def callback(my_future: Future):
            if not my_future.done() or my_future.cancelled():
                return
            result: ExResult = my_future.result()

            print(simplejson.dumps(result), file=f)

        for graph_gen, number_of_runs, num_node_list in experiments:
            for num_nodes in num_node_list:
                for i in range(number_of_runs):
                    future = pool.submit(do_experiment, base_constraints,
                                         graph_gen, i, num_nodes, pages)
                    future.add_done_callback(callback)

        pool.shutdown(wait=True)
Esempio n. 10
0
def show(qtd):
    jsons = list()

    for i in range(int(qtd)):
        executor = ProcessPoolExecutor(max_workers=2)
        task1 = executor.submit(send)

        jsons.append(task1.result())

    return {"arquivos": jsons}
Esempio n. 11
0
class ProcessesPool(PoolInterface):
    def __init__(self, amount_of_entities):
        super().__init__(amount_of_entities)
        self.pool = ProcessPoolExecutor(self.max_amount)

    def shutdown(self, timeout):
        self.pool.close()

    def execute(self, funct, socket):
        return self.pool.submit(funct, socket[0], socket[1])
Esempio n. 12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='print stats')
    parser.add_argument('-o', '--output', type=str, help='path to CSV output')
    parser.add_argument('--publishers-per-client', type=int, default=7)
    parser.add_argument('--publish-interval', type=int, default=100, help='publish interval in ms')
    parser.add_argument('--clients-per-group', type=int, default=10)
    parser.add_argument('--enable-ack', action='store_true')
    args = parser.parse_args(sys.argv[1:])
    if args.output:
        chdir(args.output)
    common_kwargs = {
        'publishers_per_client': args.publishers_per_client,
        'publish_interval': args.publish_interval,
        'clients_per_group': args.clients_per_group,
        'verbose': args.verbose,
        'enable_ack': args.enable_ack,
    }

    scenario_configs = [
        {
            **common_kwargs,
            'name': 'baseline',
        },
        {
            **common_kwargs,
            'name': 'vivaldi',
            'use_vivaldi': True,
        },
    ]

    def run_scenario(**kwargs):
        EmmaScenario(**kwargs).run()

    if args.verbose:
        print('running emma scenarios sequentially')
        for config in scenario_configs:
            run_scenario(**config)
    else:
        print('running emma scenarios in separate processes')
        executor = ProcessPoolExecutor(len(scenario_configs))
        for config in scenario_configs:
            executor.submit(run_scenario, **config)
Esempio n. 13
0
def run(executor: ProcessPoolExecutor,
        num_of_experiments: int,
        objective_function_pointer: Any, spawn_boundaries: List[List[float]],
        objective_function_goal_point: array,
        maximum_iterations: int,
        swarm_size: int = 40, isClan: bool = False, number_of_clans: int = 4, c1: float = 2.0, c2: float = 2.0,
        adaptivePSO: bool = False,
        eis: Tuple[Tuple[GlobalLocalCoefficientTypes, float or None], Tuple[ControlFactorTypes, float or None]] =
        ((GlobalLocalCoefficientTypes.NONE, None), (ControlFactorTypes.NONE, None)),
        search_and_velocity_boundaries: List[List[float]] = None, wt: WallTypes = WallTypes.NONE
        ):
    experiments_precisions_per_experiment = []
    experiments_iterations_per_experiment = []
    experiments_average_iteration_cpu_times_per_experiment = []
    experiments_cpu_times_per_experiment = []

    experiments_array = []

    for experiment in range(num_of_experiments):
        function_task = executor.submit(scripts.experiments.experiment.experiment,
                                        objective_function=objective_function_pointer,
                                        spawn_boundaries=spawn_boundaries,
                                        objective_function_goal_point=objective_function_goal_point,
                                        maximum_iterations=maximum_iterations,
                                        swarm_size=swarm_size, isClan=isClan, number_of_clans=number_of_clans,
                                        c1=c1, c2=c2,
                                        adaptivePSO=adaptivePSO,
                                        eis=eis,
                                        search_and_velocity_boundaries=search_and_velocity_boundaries, wt=wt
                                        )

        experiments_array.append(function_task)

    for task in experiments_array:
        precision, iterations, experiment_average_iteration_cpu_time, experiment_total_cpu_time = task.result()
        experiments_precisions_per_experiment.append(precision)
        experiments_iterations_per_experiment.append(iterations)
        experiments_average_iteration_cpu_times_per_experiment.append(experiment_average_iteration_cpu_time)
        experiments_cpu_times_per_experiment.append(experiment_total_cpu_time)

    mean_experiment_precision = mean(experiments_precisions_per_experiment)
    std_experiment_precision = std(experiments_precisions_per_experiment)
    median_experiment_precision = median(experiments_precisions_per_experiment)
    best_experiment_precision = min(experiments_precisions_per_experiment)
    mean_experiment_iterations = mean(experiments_iterations_per_experiment)
    mean_experiments_average_iteration_cpu_times = mean(experiments_average_iteration_cpu_times_per_experiment)
    mean_experiment_cpu_time = mean(experiments_cpu_times_per_experiment)

    return {"Precision mean": mean_experiment_precision,
            "Precision std": std_experiment_precision,
            "Precision median": median_experiment_precision,
            "Best precision": best_experiment_precision,
            "Mean iterations per experiment": mean_experiment_iterations,
            "Mean iteration CPU time": mean_experiments_average_iteration_cpu_times,
            "Mean experiment CPU time": mean_experiment_cpu_time}
def runTournamentSizeTests():
    size = 1000
    selectors = [
        TournamentSelector(150),
        TournamentSelector(350),
        TournamentSelector(500)
    ]
    crossoverThreshold = 0.2
    mutationProbability = 0.1

    # internal params
    crossoverProbability = 0.2
    mutationStrength = 1
    calculator = createDefaultLossCalculator()

    subDirectory = 'tournamentSize'
    os.makedirs(os.path.join(path, subDirectory), exist_ok=True)
    file = open(os.path.join(path, subDirectory, 'results.txt'), 'w+')
    board = loadFromFile('textTests/zad1.txt')
    file.write(f'Parametry testow:\n'
               f'Rozmiar populacji: {size}\n'
               f'Prawdopodobienstwo krzyzowania: {(1 - crossoverThreshold)}\n'
               f'Prawdopodobienstwo mutacji: {(1 - mutationProbability)}\n\n'
               f'Wyniki testow:\nParametr:\tBest:\tWorst:\tAvg:\tStdDev:\n')

    for selector in selectors:
        bestSolutionLosses = []
        worstSolutionLosses = []
        file.write(f'{selector}\t')

        pool = ProcessPoolExecutor(max_workers=10)
        futures = []
        for i in range(10):
            futures.append(
                pool.submit(
                    immediatelyCall,
                    GeneticAlgorithm(board, size, calculator, selector,
                                     crossoverThreshold, crossoverProbability,
                                     mutationProbability, mutationStrength)))
        result = wait(futures, return_when=ALL_COMPLETED)
        for index, value in enumerate(result[0]):
            (best, worst, _, image) = value.result()
            bestSolutionLosses.append(best)
            worstSolutionLosses.append(worst)
            image.save(
                os.path.join(path, subDirectory,
                             f'{selector}-test-{index}.png'))
            image.close()
        best = min(bestSolutionLosses)
        worst = max(worstSolutionLosses)
        avg = statistics.mean(bestSolutionLosses)
        stdDev = statistics.stdev(bestSolutionLosses)
        file.write(f'{best}\t{worst}\t{avg}\t{stdDev}\n')
    file.close()
Esempio n. 15
0
def lyricSpider(user_id):
    print("======= 开始爬 歌词 信息 ===========")
    startTime = datetime.datetime.now()
    print(startTime.strftime('%Y-%m-%d %H:%M:%S'))
    # 所有歌手数量
    try:
        musics_num = sql.get_music_num(user_id)
    except:
        print("用户未开启权限,程序结束")
        sys.exit(0)
    print("musics :", len(musics_num), "start")
    batch = math.ceil(musics_num.get('num') / 34.0)
    pool = ProcessPoolExecutor(3)
    for index in range(0, batch):
        pool.submit(saveLyricBatch, user_id, index)
    pool.shutdown(wait=True)
    print("======= 结束爬 歌词 信息 ===========")
    endTime = datetime.datetime.now()
    print(endTime.strftime('%Y-%m-%d %H:%M:%S'))
    print("耗时:", (endTime - startTime).seconds, "秒")
Esempio n. 16
0
def create_process(func: Callable, *args: Any, **kwargs: Any) -> Future:
    '''
    Calls a function in its own process
    :param func: The function to be called
    :param args: The function arguments
    :param kwargs: The function keyword arguments
    :return: The created Future object, from which we can call 'result()' to get the function return value.
    '''
    tp = ProcessPoolExecutor(max_workers=1)
    future = tp.submit(func, *args, **kwargs)
    create_thread(tp.shutdown, wait=True)  # Necessary since wait=False is broken
    return future
Esempio n. 17
0
    class DelayedFileList:
        def __init__(self, filename):
            self.__loader = ProcessPoolExecutor(4)

            self.__filehandle = open(
                filename, "r", encoding="iso-8859-1"
            )  # :type self.__filehandle: typing.TextIO
            self.__lines = 0
            self.__thousand_offsets = [0]
            line = self.__filehandle.readline()
            while line:
                if line.strip():
                    self.__lines += 1
                    if self.__lines % 1000 == 0:
                        self.__thousand_offsets.append(
                            self.__filehandle.tell())
                line = self.__filehandle.readline()
            self.__filehandle.seek(0)

        def __len__(self):
            return self.__lines

        def __getitem__(self, i):
            if i < 0 or i >= len(self):
                raise IndexError("Out of bounds")

            thousand_offset = self.__thousand_offsets[i // 1000]
            remainder = i % 1000

            self.__filehandle.seek(thousand_offset)
            while remainder > 0:
                self.__filehandle.readline()
                remainder -= 1

            name, url = [
                x.strip()
                for x in self.__filehandle.readline().strip().split("\t")
            ]
            class_name = name.split("_")[0]

            pre_image = Image(name, class_name, url, None, None)

            submitted_task = self.__loader.submit(load_image, pre_image.url)

            def deferred_load():
                return submitted_task.result()

            return Image(name, class_name, url,
                         load_annotation_for_image(pre_image), deferred_load)

        def close(self):
            self.__loader.shutdown()
            self.__filehandle.close()
Esempio n. 18
0
def thead_collect():
    results = MONGO.select('albums', {'collected': {'$ne': 1}}, limit=200)
    # thead_pool = ThreadPoolExecutor(5)
    thead_pool = ProcessPoolExecutor(5)

    while results:
        for result in results:
            baby = CollectBaby(result)
            # magic()
            thead_pool.submit(baby.collect)
            # baby.collect()

        results = MONGO.select('albums', {
            '_id': {
                '$gt': result.get('_id')
            },
            'collected': {
                '$ne': 1
            }
        },
                               limit=200)

    thead_pool.shutdown(wait=True)
Esempio n. 19
0
async def test_endpoint():
    print(f"main process: {os.getpid()}")

    START_TIME = time.time()
    STOP_TIME = START_TIME + 2

    pool = ProcessPoolExecutor(max_workers=3)
    futures = [
        pool.submit(simple_routine, [1]),
        pool.submit(simple_routine, [1]),
        pool.submit(simple_routine, [10]),
    ]
    results = []
    for fut in futures:
        remains = max(STOP_TIME - time.time(), 0)
        try:
            results.append(fut.get(timeout = remains))
        except:
            results.append("not done")

    # terminate the entire pool
    pool.shutdown(wait=False)
    print("exiting at: ", int(time.time() - START_TIME))
    return "True"
Esempio n. 20
0
def get_linkpred_scores(lp_data,
                        weighted,
                        predictor_indices=None,
                        include_train=False,
                        parallel_version=False):
    predictor_indices = predictor_indices or range(len(all_predictors))
    predictors = [all_predictors[i] for i in predictor_indices]
    predictor_names = [all_predictor_names[i] for i in predictor_indices]
    A_train = lp_data['A_train']
    A_test = lp_data['A_test']
    A_test_pos = lp_data['A_test_pos']
    A_test_neg = lp_data['A_test_neg']
    G_train = nx.from_scipy_sparse_matrix(A_train)
    if include_train:
        pairs = list(itertools.combinations(range(A_train.shape[0]), 2))
    else:
        test_pairs = list(zip(*triu(A_test_pos + A_test_neg).nonzero()))
        pairs = test_pairs if not include_train else test_pairs + list(
            zip(*triu(A_train).nonzero()))
    scores = {}
    if not parallel_version:
        for i, predictor in tqdm(enumerate(predictors), 'Predictor: '):
            predictor_name = predictor_names[i]
            abbr, lp_scores = perform_one_lp(predictor_name, predictor,
                                             G_train, pairs, weighted)
            scores[abbr] = lp_scores
    else:
        num_predictors = len(predictors)
        max_workers = min(num_predictors, multiprocessing.cpu_count())
        pool = ProcessPoolExecutor(max_workers=max_workers)
        process_list = []
        for i, predictor in enumerate(predictors):
            predictor_name = predictor_names[i]
            process_list.append(
                pool.submit(perform_one_lp, predictor_name, predictor, G_train,
                            pairs, weighted))
            print('{} of {} processes scheduled ({})'.format(
                len(process_list), num_predictors, predictor_name))
        for p in as_completed(process_list):
            abbr, lp_scores = p.result()
            scores[abbr] = lp_scores
            print('{} of {} processes completed'.format(
                len(scores), len(process_list)))
        pool.shutdown(wait=True)
    scores_df = pd.DataFrame(scores)
    return scores_df
Esempio n. 21
0
class ProcessExecutor(Executor):
    slug = 'executor:process'
    name = "Executor: Process"

    awaiter_dict = {
        'as_completed': as_completed,
    }

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.executor = ProcessPoolExecutor(max_workers=self.workers)

    def submit(self, fn: Callable, *args, **kwargs):
        return self.executor.submit(fn, *args, **kwargs)

    def shutdown(self, wait=True):
        self.executor.shutdown(wait)
def main(args, device, num_available_devices):
    model_path = Path(args.model)
    root_dir = Path(args.root_dir)

    image_paths = [
        file_name for file_name in root_dir.glob('**/*') if is_image(file_name)
    ]
    analyzed_images = []

    ctx = multiprocessing.get_context('forkserver')
    executor = ProcessPoolExecutor(max_workers=num_available_devices,
                                   mp_context=ctx,
                                   initializer=init_process,
                                   initargs=(model_path, not args.no_split,
                                             device))

    try:
        with executor:
            current_jobs = []
            for i, image_path in enumerate(image_paths):
                submitted_job = executor.submit(
                    consumer, image_path,
                    str(image_path.relative_to(root_dir)))
                current_jobs.append(submitted_job)

            for job in tqdm(as_completed(current_jobs),
                            total=len(current_jobs)):
                try:
                    result = job.result()
                    analyzed_images.append(result)
                except Exception as e:
                    print(f"Could not process {str(image_path)}, reason: {e}")
                    traceback.print_exc(file=sys.stdout)
    except KeyboardInterrupt:
        pass

    with (root_dir / 'handwriting_analysis.json').open('w') as f:
        json.dump(analyzed_images, f, indent='\t')

    num_has_handwriting = len(
        [im for im in analyzed_images if im['has_handwriting']])
    print(
        f"Handwriting to no handwriting ratio: {num_has_handwriting / len(analyzed_images)}"
    )
Esempio n. 23
0
    def runner(self):
        process_pool = ProcessPoolExecutor(max_workers=4)
        futures = dict()
        for url in self.urls:
            future = process_pool.submit(self.get_web_content, url)#造子弹
            futures[future] = url#用url给子弹标上号然后放入子弹箱

        for future in concurrent.futures.as_completed(futures):#取子弹
            try:
                url = futures[future]#得到子弹的标号
                crawl_result = future.result()
                urlretrieve(url = crawl_result['img_src'],filename = 'F:\\CSGO\\csitem\\' + crawl_result["image_name"] + '.png')
                #数据库的插入
                #self.cursor.execute("INSERT INTO csgo_item (type,item_type,item_name,price,img,exterior,quality,rarity,sales) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s')"
                #               % (crawl_result["csgotype"],crawl_result["item_type"],crawl_result["name_text"],crawl_result["price"],crawl_result["image_name"],crawl_result["exterior"],crawl_result["quality"],crawl_result["rarity"],crawl_result["sales"])) 
                #self.conn.commit()
            except:
                print("线程取出出现错误")
                continue
        print('Finished!')
Esempio n. 24
0
    def computeAllPredictions(self, X: pd.DataFrame):
        if self.numProcesses == 1 or len(self.models) == 1:
            return [model.predict(X) for model in self.models]

        predictionFutures = []
        executor = ProcessPoolExecutor(max_workers=self.numProcesses)
        predictors = [
            VectorModelWithSeparateFeatureGeneration(model)
            for model in self.models
        ]
        for predictor in predictors:
            predictFinaliser = predictor.predictStart(X)
            frameinfo = getframeinfo(currentframe())
            PickleFailureDebugger.logFailureIfEnabled(
                predictFinaliser,
                contextInfo=
                f"Submitting {predictFinaliser} in {frameinfo.filename}:{frameinfo.lineno}"
            )
            predictionFutures.append(executor.submit(predictFinaliser.execute))
        return [
            predictionFuture.result() for predictionFuture in predictionFutures
        ]
Esempio n. 25
0
def main():
    pool = ProcessPoolExecutor(max_workers=int(multiprocessing.cpu_count() *
                                               0.25))
    pages = [{
        'id': "p1",
        'type': 'STACK',
        'constraint': None
    }, {
        'id': "p2",
        'type': 'STACK',
        'constraint': None
    }]
    base_constraints = [
        {
            "type": "EDGES_TO_SUB_ARC_ON_PAGES",
            "arguments": ["0", "1"],  # the outer terminals
            "modifier": ["p1", "p2"]  # the pages
        },
        # inner terminals are after one outer terminal and before the other
        {
            "type": "NODES_PREDECESSOR",
            "arguments": ["0"],
            "modifier": ["2", "3"],
        },
        {
            "type": "NODES_PREDECESSOR",
            "arguments": ["2", "3"],
            "modifier": ["1"],
        }
    ]
    set_printing(False)

    with open("results_random_planar_110.json", mode="r") as f:
        for line in f:
            future = pool.submit(do_experiment, base_constraints, pages, line)
            future.add_done_callback(callback)

        pool.shutdown(wait=True)
Esempio n. 26
0
    def _fit(self, X: pd.DataFrame, Y: pd.DataFrame):
        if self.numProcesses == 1 or len(self.models) == 1:
            for model in self.models:
                model.fit(X, Y)
            return

        fittedModelFutures = []
        executor = ProcessPoolExecutor(max_workers=self.numProcesses)
        fitters = [
            VectorModelWithSeparateFeatureGeneration(model)
            for model in self.models
        ]
        for fitter in fitters:
            intermediateStep = fitter.fitStart(X, Y)
            frameinfo = getframeinfo(currentframe())
            PickleFailureDebugger.logFailureIfEnabled(
                intermediateStep,
                contextInfo=
                f"Submitting {fitter} in {frameinfo.filename}:{frameinfo.lineno}"
            )
            fittedModelFutures.append(executor.submit(
                intermediateStep.execute))
        for i, fittedModelFuture in enumerate(fittedModelFutures):
            self.models[i] = fitters[i].fitEnd(fittedModelFuture.result())
Esempio n. 27
0
def main():
    start = time.time()

    args = parse_args()
    wwise_dir = Path(args.wwise_dir)
    out_dir = Path(args.out_dir)
    out_dir.mkdir(exist_ok=True)
    id_to_filename_path = out_dir / ID_TO_FILENAME

    manager = mp.Manager()
    queue = manager.Queue()
    id_queue = manager.Queue()

    setup_logging(queue)

    target_handlers = logbook.NestedSetup([
        logbook.NullHandler(),
        logbook.StreamHandler(sys.stdout, level=logbook.INFO, bubble=True),
        logbook.FileHandler("extract.log",
                            mode="w",
                            level=logbook.INFO,
                            bubble=True),
    ])

    sub = MultiProcessingSubscriber(queue)
    controller = sub.dispatch_in_background(target_handlers)

    quickbms_log_lock = manager.Lock()
    quickbms_log = out_dir / "quickbms.log"

    try:
        id_to_filename_path.unlink()
        logbook.info("removed old {id_file}",
                     id_file=id_to_filename_path.absolute())
    except FileNotFoundError:
        pass

    logbook.info("QuickBMS log: '{qlog}'", qlog=quickbms_log.absolute())
    try:
        quickbms_log.unlink()
        logbook.info("removed old {f}", f=quickbms_log.absolute())
    except FileNotFoundError:
        pass

    id_to_filename_path.touch()
    logbook.info("writing old ID -> new filename info in '{id_file}'",
                 id_file=id_to_filename_path.absolute())

    id_to_filename_p = mp.Process(target=id_to_filename_worker,
                                  args=(id_queue, id_to_filename_path, queue))
    id_to_filename_p.start()

    logbook.info("processing audio files in '{wd}'", wd=wwise_dir.absolute())

    fut2func = {}
    # Parse .bnk files and metadata.
    with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
        fut2func[executor.submit(parse_banks_metadata, wwise_dir,
                                 queue)] = parse_banks_metadata
        fut2func[executor.submit(decode_banks, wwise_dir, out_dir,
                                 quickbms_log, quickbms_log_lock,
                                 queue)] = decode_banks

    memory_bnk_meta_file2metadata = {}
    streamed_bnk_meta_file2metadata = {}
    orig_bnk2decode_info = {}
    for completed_fut in futures.as_completed(fut2func):
        if fut2func[completed_fut] == parse_banks_metadata:
            result = completed_fut.result()
            memory_bnk_meta_file2metadata = result[0]
            streamed_bnk_meta_file2metadata = result[1]
        elif fut2func[completed_fut] == decode_banks:
            orig_bnk2decode_info = completed_fut.result()

    if len(memory_bnk_meta_file2metadata) != len(orig_bnk2decode_info):
        logbook.warning(
            "Amount of Bank and metadata files "
            "do not match ({first}) != {second})",
            first=len(orig_bnk2decode_info),
            second=len(memory_bnk_meta_file2metadata))

        s1 = memory_bnk_meta_file2metadata.keys()
        s2 = set([key.stem for key in orig_bnk2decode_info])

        to_del = []
        diff = s2.difference(s1)
        for d in diff:
            # TODO: expensive!
            for key in orig_bnk2decode_info:
                if key.stem == d:
                    logbook.warn("ignoring {f}", f=str(key))
                    to_del.append(key)

        for td in to_del:
            del orig_bnk2decode_info[td]

    wem_src2wem_dst = {}
    # Move .wem files to out_dir in correct places.
    with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
        for bnk_meta_file, meta in streamed_bnk_meta_file2metadata.items():
            for m in meta:
                src_dir = bnk_meta_file.parent
                src = src_dir / Path(m.generated_audio_file)
                if src.exists():
                    wwise_path = Path(m.wwise_object_path)
                    dst = out_dir / wwise_path.relative_to(
                        wwise_path.anchor).with_suffix(".wem")
                    executor.submit(copy, src, dst, queue, id_queue)
                    wem_src2wem_dst[src] = dst
                else:
                    logbook.warning(
                        "found references to {src} in metadata, but "
                        "the file cannot be found in wwise_dir",
                        src=src)

    decoded_file2metas = {}

    for orig_bnk_file, decode_info in orig_bnk2decode_info.items():
        orig_bnk_file = orig_bnk_file.stem
        meta = memory_bnk_meta_file2metadata[orig_bnk_file]

        if len(decode_info) != len(meta):
            logbook.error(
                "decode_info and meta length mismatch: "
                "{len1} != {len2} for bnk: '{bnk}'",
                len1=len(decode_info),
                len2=len(meta),
                bnk=orig_bnk_file)
            # print(decode_info)
            # print(meta)
            continue
            # raise ValueError(f"decode_info and meta length mismatch "
            #                  f"{len(decode_info)} != {len(meta)}")

        for m, (decoded_stem, decoded_size) in zip(meta, decode_info.items()):
            if m.data_size != decoded_size:
                # raise ValueError(f"{m.data_size} != {decoded_size}")
                logbook.error(
                    "metadata size and decoded data size length mismatch: "
                    "{len1} != {len2}",
                    len1=m.data_size,
                    len2=decoded_size)
                continue
            decoded_file2metas[decoded_stem] = m

    fs = []
    # Move output from decoding .bnk files to correct places in out_dir.
    executor = ProcessPoolExecutor(max_workers=MAX_WORKERS)
    for decoded_file, meta in decoded_file2metas.items():
        src = out_dir / f"{decoded_file}.bin"
        wwise_path = Path(meta.wwise_object_path)
        dst = out_dir / wwise_path.relative_to(
            wwise_path.anchor).with_suffix(".bin")
        fs.append(executor.submit(move, src, dst, queue, id_queue))

    futures.wait(fs, return_when=futures.ALL_COMPLETED)

    fs = []
    # Convert all .wem and .bin files to .ogg.
    executor = ProcessPoolExecutor(max_workers=MAX_WORKERS)
    for bin_file in out_dir.rglob("*.bin"):
        fs.append(executor.submit(ww2ogg, bin_file, queue))
    for wem_file in out_dir.rglob("*.wem"):
        fs.append(executor.submit(ww2ogg, wem_file, queue))

    futures.wait(fs, return_when=futures.ALL_COMPLETED)

    done_wems_stems = set([ws.stem for ws in wem_src2wem_dst.keys()])
    source_wems = [w for w in wwise_dir.rglob("*.wem")]
    source_wems_stems = set([w.stem for w in source_wems])
    wem_diff = source_wems_stems.difference(done_wems_stems)

    if wem_diff:
        logbook.warn("failed to determine filename for "
                     "{num} files",
                     num=len(wem_diff))

    for ws in source_wems:
        if str(ws.stem) in wem_diff:
            logbook.info("couldn't determine filename for: {ws}", ws=ws)
            copy_seq(ws, out_dir, id_to_filename_queue=None)

    # Convert leftovers.
    leftovers_fs = []
    with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
        for wem_file in out_dir.rglob("*.wem"):
            leftovers_fs.append(executor.submit(ww2ogg, wem_file, queue))

    futures.wait(leftovers_fs, return_when=futures.ALL_COMPLETED)

    id_queue.put(SENTINEL)
    id_to_filename_p.join()

    secs = time.time() - start
    logbook.info("finished successfully in {secs:.2f} seconds", secs=secs)

    controller.stop()
Esempio n. 28
0
class ConcurrentMapData(Node):
    def __init__(self,
                 ds,
                 num_concurrent=1,
                 map_func=None,
                 executor_type=None,
                 buffer_factor=4,
                 none_on_exception=False,
                 initializer=None,
                 initargs=()):
        if num_concurrent == 1:
            _logger.warn("You need num_concurrent > 1 to obtain acceleration.")
        assert num_concurrent > 0
        if num_concurrent >= os.cpu_count():
            _logger.warn("num_concurrent is too large")

        assert map_func is not None
        assert buffer_factor > 1

        self._ds = ds
        self._num_concurrent = num_concurrent
        self._fn = map_func
        self._exe_type = executor_type
        self._num_submit = int(buffer_factor * self._num_concurrent)
        self._none_on_exception = none_on_exception

        if self._exe_type == "thread":
            self._exe = ThreadPoolExecutor(max_workers=self._num_concurrent,
                                           initializer=initializer,
                                           initargs=initargs)
        elif self._exe_type == "process":
            self._exe = ProcessPoolExecutor(max_workers=self._num_concurrent,
                                            initializer=initializer,
                                            initargs=initargs)
        else:
            raise RuntimeError(
                "Unknown executor type, must be 'thread' or 'process'")
        self._exhausted = False

    def __iter__(self):
        def try_fill_pending(pending: set, iterator):
            while not self._exhausted:
                if len(pending) >= self._num_submit:
                    break
                try:
                    dp = next(iterator)
                    future = self._exe.submit(self._fn, dp)
                    pending.add(future)
                except StopIteration:
                    self._exhausted = True
                    break

        done = set()
        pending = set()
        it = iter(self._ds)
        while True:
            try_fill_pending(pending, it)
            if len(pending) == 0:
                break  # raise StopIteration
            else:
                done, pending = concurrent.futures.wait(
                    pending, return_when=concurrent.futures.FIRST_COMPLETED)
                for d in done:
                    if d is not None:
                        try:
                            yield d.result()
                        except Exception as e:
                            _logger.error(repr(e))
                            if self._none_on_exception:
                                yield None
Esempio n. 29
0
            # win32api.keybd_event(86, 0, 0, 0)  # v
            # win32api.keybd_event(86, 0, win32con.KEYEVENTF_KEYUP, 0)
            # win32api.keybd_event(17, 0, win32con.KEYEVENTF_KEYUP, 0)
            # # alt s
            # win32api.keybd_event(18, 0, 0, 0)  # Alt
            # win32api.keybd_event(83, 0, 0, 0)  # s
            # win32api.keybd_event(18, 0, win32con.KEYEVENTF_KEYUP, 0)
            # win32api.keybd_event(83, 0, win32con.KEYEVENTF_KEYUP, 0)
        time.sleep(5)


if __name__ == '__main__':
    q = Manager().Queue()  # 多进程队列
    executor = ProcessPoolExecutor(max_workers=10)  # 进程池
    # 九位玩家分别对应一个子进程
    task_1 = executor.submit(subprocess_event_producer, BYISHIN_64BIT,
                             BYISHIN_32BIT, q)
    task_2 = executor.submit(subprocess_event_producer, LEEROY_64BIT,
                             LEEROY_32BIT, q)
    task_3 = executor.submit(subprocess_event_producer, NEKO_64BIT, NEKO_32BIT,
                             q)
    task_4 = executor.submit(subprocess_event_producer, ORI_64BIT, ORI_32BIT,
                             q)
    task_5 = executor.submit(subprocess_event_producer, ASIIMOV_64BIT,
                             ASIIMOV_32BIT, q)
    task_6 = executor.submit(subprocess_event_producer, SAKANA_64BIT,
                             SAKANA_32BIT, q)
    task_7 = executor.submit(subprocess_event_producer, DIDIDI_64BIT,
                             DIDIDI_32BIT, q)
    task_8 = executor.submit(subprocess_event_producer, NEVEROWNED_64BIT,
                             NEVEROWNED_32BIT, q)
    task_9 = executor.submit(subprocess_event_producer, RABBIT_64BIT,
Esempio n. 30
0
import requests
from concurrent.futures.process import ProcessPoolExecutor

from utils import ts

M = 5000
executor = ProcessPoolExecutor(16)


def job():
    # print(f'job on thread {threading.current_thread().name}')
    r = requests.get('http://localhost:2233/status')
    return len(r.json()['comment'])


# future = executor.submit(job)

futures = []
start = ts()
for i in range(M):
    futures.append(executor.submit(job))
result = sum(f.result() for f in futures)

print(f'check: 12={result/M}')

delta = ts() - start
print(f'{M} requests in {delta:.3f}s ({M / delta:.0f}RPS)')
Esempio n. 31
0
def do_test3(workers):
    param = {"max_workers": workers}
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    pre_input1 = input_generator(workers, 0)
    pre_input2 = input_generator(workers, max(pre_input1))
    pre_input3 = input_generator(workers, max(pre_input2))

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    tstart = round(time.time()+1)
    input1 = [tstart + i for i in pre_input1]
    input2 = [tstart + i for i in pre_input2]
    input3 = [tstart + i for i in pre_input3]

    for x in input1:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    result_iter = texec.map(wake_at, input2)
    for x in input3:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    for x in result_iter:
        with lock:
            tresult.append(x)

    texec.shutdown(True)

    pstart = round(time.time() + _start_warm_up)
    input1 = [pstart + i for i in pre_input1]
    input2 = [pstart + i for i in pre_input2]
    input3 = [pstart + i for i in pre_input3]

    for x in input1:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    result_iter = pexec.map(wake_at, input2)
    for x in input3:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    for x in result_iter:
        with lock:
            presult.append(x)

    pexec.shutdown(True)

    cstart = round(time.time() + _start_warm_up)
    input1 = [cstart + i for i in pre_input1]
    input2 = [cstart + i for i in pre_input2]
    input3 = [cstart + i for i in pre_input3]

    async def async_main():
        for x in input1:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        result_iter = cexec.map(async_wake_at, input2)
        for x in input3:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        async for x in result_iter:
            with lock:
                cresult.append(x)
        await cexec.shutdown(False)

    loop.run_until_complete(async_main())

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - tstart) / _precision) for x in tresult]
    presult = [round((x - pstart) / _precision) for x in presult]
    cresult = [round((x - cstart) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t,p,c)
            assert False
        result = result and (p == c)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (c == t)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
    return result
Esempio n. 32
0
def do_test1(workers):
    param = {"max_workers": workers}
    start = round(time.time() + _start_warm_up)
    input = input_generator(workers, start)
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    for x in input:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))

        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))

        future = cexec.submit(async_wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, cresult, lock))

    texec.shutdown(False)
    pexec.shutdown(False)
    loop.run_until_complete(cexec.shutdown(False))

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - start) / _precision) for x in tresult]
    presult = [round((x - start) / _precision) for x in presult]
    cresult = [round((x - start) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (p == c)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (c == t)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
    return result