Example #1
0
    def execute(self, run_id, run):
        # get run_info objects
        with self.get_run(run_id) as run:
            logger.info("Run ID: %s" % run_id)
            # for each exec_group in that run ...
            for exec_group in run.get_exec_groups():
                # ... create a process pool
                with process_pool.ProcessPool(run) as pool:
                    # Clean up (use last ProcessPool for that)
                    if exec_group == run.get_exec_groups()[-1]:
                        logger.info("Telling pipeline to clean up!")
                        pool.clean_up_temp_paths()

                    for poc in exec_group.get_pipes_and_commands():
                        # for each pipe or command (poc)
                        # check if it is a pipeline ...
                        if isinstance(poc, pipeline_info.PipelineInfo):
                            # ... create a pipeline ...
                            with pool.Pipeline(pool) as pipeline:
                                for command in poc.get_commands():
                                    pipeline.append(
                                        command.get_command(),
                                        stdout_path=command.get_stdout_path(),
                                        stderr_path=command.get_stderr_path())
                        elif isinstance(poc, command_info.CommandInfo):
                            pool.launch(poc.get_command(),
                                        stdout_path=poc.get_stdout_path(),
                                        stderr_path=poc.get_stderr_path())
Example #2
0
 def _finished_tasks(self):
     if len(self._file_list) < self._worker_size:
         self._worker_size = len(self._file_list)
     self.log.info("进程数:{}".format(self._worker_size))
     process_args = [(self._line_call, self._result_suffix, i)
                     for i in self._file_list]
     with process_pool.ProcessPool(size=self._worker_size,
                                   target=self.file_part_handler,
                                   iterable=process_args,
                                   logger=self.log,
                                   p_bar=True) as pool:
         self.return_code = pool.run()
         self.err_msg = pool.err_msg
     self._merge_output_file()
Example #3
0
    def _create_process_pool(self):
        if self.settings["process_num"] == 0:
            proc_count = 0
        elif self.settings["process_num"] > 0:
            proc_count = self.settings["process_num"]
        else:
            proc_count = multiprocessing.cpu_count()
            if proc_count > 3:
                proc_count = proc_count - 2
            else:
                proc_count = 1

        _LOGGER.info("process_pool_size = %d", proc_count)

        if proc_count > 0:
            import process_pool as pp
            return pp.ProcessPool(proc_count)
        else:
            return None
Example #4
0
import process_pool
import time


def benchmark(data_chunk):
    a = 2 ** data_chunk
    return "Done"


if __name__ == '__main__':
    start_time = time.process_time()
    pool = process_pool.ProcessPool()
    results = pool.map(benchmark,
                       [100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000,
                        100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000,
                        100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000,
                        100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000,
                        100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000,
                        100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000, 100000000,
                        100000000, 100000000, 100000000, 100000000])
    print("results:", results)
    print("spend time:", time.perf_counter() - start_time)