Exemplo n.º 1
0
def set_mp_start_method(val=None):
    """Set the multiprocessing start method.

    If the start method has already been applied, will skip.

    Args:
        val (str): Start method to set; defaults to None to use the default
            for the platform. If the given method is not available for the
            platform, the default method will be used instead.

    Returns:
        str: The applied start method.

    """
    if val is None:
        val = config.roi_profile["mp_start"]
    avail_start_methods = mp.get_all_start_methods()
    if val not in avail_start_methods:
        val = avail_start_methods[0]
    try:
        mp.set_start_method(val)
        print("set multiprocessing start method to", val)
    except RuntimeError:
        print("multiprocessing start method already set to {}, will skip"
              .format(mp.get_start_method(False)))
    return val
Exemplo n.º 2
0
def try_set_start_method(name: str) -> Optional[mp.context.BaseContext]:
    """Attempt to set the start method for process starting, aka the "actor
    spawning backend".

    If the desired method is not supported this function will error. On
    Windows the only supported option is the ``multiprocessing`` "spawn"
    method. The default on *nix systems is ``trio_run_in_process``.
    """
    global _ctx
    global _spawn_method

    methods = mp.get_all_start_methods()
    if 'fork' in methods:
        # forking is incompatible with ``trio``s global task tree
        methods.remove('fork')

    # no Windows support for trip yet
    if platform.system() != 'Windows':
        methods += ['trio_run_in_process']

    if name not in methods:
        raise ValueError(
            f"Spawn method `{name}` is invalid please choose one of {methods}")
    elif name == 'forkserver':
        _forkserver_override.override_stdlib()
        _ctx = mp.get_context(name)
    elif name == 'trio_run_in_process':
        _ctx = None
    else:
        _ctx = mp.get_context(name)

    _spawn_method = name
    return _ctx
Exemplo n.º 3
0
	def process_images_in_process_pool(self):
		number_of_cpus = -1

		if number_of_cpus == -1:
			processes = None
		else:
			processes = number_of_cpus

		# macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
		context = multiprocessing
		if "forkserver" in multiprocessing.get_all_start_methods():
			context = multiprocessing.get_context("forkserver")

		# pool = context.Pool(processes=processes)
		# pool = multiprocessing.Pool(processes)
		pool = ThreadPool(processes)



		index_th = []
		for i in range(3):
			index_th.append(i)
		
		function_parameters = zip(
			index_th,
			# itertools.repeat(label_list),
			# itertools.repeat(known_faces),
		)
		pool.map(thread, function_parameters)
Exemplo n.º 4
0
    def try_multiprocessing_code(
        self, code, expected_out, the_module, concurrency="multiprocessing"
    ):
        """Run code using multiprocessing, it should produce `expected_out`."""
        self.make_file("multi.py", code)
        self.make_file(".coveragerc", """\
            [run]
            concurrency = %s
            source = .
            """ % concurrency)

        if env.PYVERSION >= (3, 4):
            start_methods = ['fork', 'spawn']
        else:
            start_methods = ['']

        for start_method in start_methods:
            if start_method and start_method not in multiprocessing.get_all_start_methods():
                continue

            out = self.run_command("coverage run multi.py %s" % (start_method,))
            expected_cant_trace = cant_trace_msg(concurrency, the_module)

            if expected_cant_trace is not None:
                self.assertEqual(out, expected_cant_trace)
            else:
                self.assertEqual(out.rstrip(), expected_out)

                out = self.run_command("coverage combine")
                self.assertEqual(out, "")
                out = self.run_command("coverage report -m")

                last_line = self.squeezed_lines(out)[-1]
                self.assertRegex(last_line, r"multi.py \d+ 0 100%")
    def __init__(self, env_fns, start_method=None):
        self.waiting = False
        self.closed = False
        n_envs = len(env_fns)

        if start_method is None:
            # Fork is not a thread safe method (see issue #217)
            # but is more user friendly (does not require to wrap the code in
            # a `if __name__ == "__main__":`)
            forkserver_available = 'forkserver' in multiprocessing.get_all_start_methods()
            start_method = 'forkserver' if forkserver_available else 'spawn'
        ctx = multiprocessing.get_context(start_method)

        self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])
        self.processes = []
        for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
            args = (work_remote, remote, CloudpickleWrapper(env_fn))
            # daemon=True: if the main process crashes, we should not cause things to hang
            process = ctx.Process(target=_worker, args=args, daemon=True)
            process.start()
            self.processes.append(process)
            work_remote.close()

        self.remotes[0].send(('get_spaces', None))
        observation_space, action_space = self.remotes[0].recv()
        VecEnv.__init__(self, len(env_fns), observation_space, action_space)
Exemplo n.º 6
0
def process_files(paths,
                  input_folder,
                  output_folder,
                  model,
                  processes,
                  process_fn,
                  tolerance=0.6):
    if processes == -1:
        processes = None

    function_parameters = [
        paths,
        itertools.repeat(input_folder),
        itertools.repeat(output_folder),
        itertools.repeat(model)
    ]
    if process_fn == save_keypoints_video:
        function_parameters.append(itertools.repeat(tolerance))

    if processes == 1:
        for parameters in zip(*function_parameters):
            process_fn(*parameters)
    else:
        context = multiprocessing
        if "forkserver" in multiprocessing.get_all_start_methods():
            context = multiprocessing.get_context("forkserver")
        pool = context.Pool(processes=processes)
        pool.starmap(process_fn, zip(*function_parameters))
Exemplo n.º 7
0
    def try_multiprocessing_code(
        self, code, expected_out, the_module, concurrency="multiprocessing"
    ):
        """Run code using multiprocessing, it should produce `expected_out`."""
        self.make_file("multi.py", code)
        self.make_file(".coveragerc", """\
            [run]
            concurrency = %s
            source = .
            """ % concurrency)

        if env.PYVERSION >= (3, 4):
            start_methods = ['fork', 'spawn']
        else:
            start_methods = ['']

        for start_method in start_methods:
            if start_method and start_method not in multiprocessing.get_all_start_methods():
                continue

            out = self.run_command("coverage run multi.py %s" % (start_method,))
            expected_cant_trace = cant_trace_msg(concurrency, the_module)

            if expected_cant_trace is not None:
                self.assertEqual(out, expected_cant_trace)
            else:
                self.assertEqual(out.rstrip(), expected_out)

                out = self.run_command("coverage combine")
                self.assertEqual(out, "")
                out = self.run_command("coverage report -m")

                last_line = self.squeezed_lines(out)[-1]
                self.assertRegex(last_line, r"multi.py \d+ 0 100%")
Exemplo n.º 8
0
    def _validate_config_dependencies(self):
        """
        Validate that config values aren't invalid given other config values
        or system-level limitations and requirements.
        """
        is_executor_without_sqlite_support = self.get("core",
                                                      "executor") not in (
                                                          'DebugExecutor',
                                                          'SequentialExecutor',
                                                      )
        is_sqlite = "sqlite" in self.get('core', 'sql_alchemy_conn')
        if is_executor_without_sqlite_support and is_sqlite:
            raise AirflowConfigException(
                "error: cannot use sqlite with the {}".format(
                    self.get('core', 'executor')))

        if self.has_option('core', 'mp_start_method'):
            mp_start_method = self.get('core', 'mp_start_method')
            start_method_options = multiprocessing.get_all_start_methods()

            if mp_start_method not in start_method_options:
                raise AirflowConfigException("mp_start_method should not be " +
                                             mp_start_method +
                                             ". Possible values are " +
                                             ", ".join(start_method_options))
Exemplo n.º 9
0
 def _get_multiprocessing_context(
         start_method: Optional[str]) -> BaseContext:
     if start_method is None:
         forkserver_available = 'forkserver' in multiprocessing.get_all_start_methods(
         )
         start_method = 'forkserver' if forkserver_available else 'spawn'
     return multiprocessing.get_context(start_method)
Exemplo n.º 10
0
    def __init__(self, gym_fucntion: Callable[[], gym.Env], num_processes: int,
                 agents_per_process: int):

        assert num_processes > 0

        self.num_processes = num_processes
        self.agents_per_process = agents_per_process

        self.waiting = False
        forkserver_available = "forkserver" in mp.get_all_start_methods()
        start_method = "forkserver" if forkserver_available else "spawn"
        ctx = mp.get_context(start_method)

        self.remotes, self.work_remotes = zip(
            *[ctx.Pipe() for _ in range(num_processes)])
        self.processes = []
        for work_remote, remote in zip(self.work_remotes, self.remotes):
            args = (work_remote, remote, CloudpickleWrapper(gym_fucntion),
                    agents_per_process)
            process = ctx.Process(target=_worker, args=args, daemon=True)
            process.start()
            self.processes.append(process)
            work_remote.close()

        self.remotes[0].send(("get_spaces", None))
        self.observation_space, self.action_space = self.remotes[0].recv()
Exemplo n.º 11
0
def processPoolProcessImgs(images2Check, numCPU, model):
    # To multiprocess images for efficiency based on number of CPUs

    # PARAMETERS
    # images2Check      images to check for face locations
    # numCPU            number of CPUs to use . -1 = max
    # model             cnn / hog

    # uses multiprocessing module to stream all images to check to testImg function

    if numCPU == -1:
        processes = None
    else:
        processes = numCPU

    context = multiprocessing
    if "forkserver" in multiprocessing.get_all_start_methods():
        context = multiprocessing.get_context("forkserver")

    pool = context.Pool(processes=processes)

    function_parameters = zip(
        images2Check,
        itertools.repeat(model),
    )

    pool.starmap(testImg, function_parameters)
Exemplo n.º 12
0
def process_images_in_process_pool(
    images_to_check,
    known_names,
    known_face_encodings,
    number_of_cpus,
    tolerance,
    show_distance,
):
    if number_of_cpus == -1:
        processes = None
    else:
        processes = number_of_cpus

    print(f"Testing {len(images_to_check)} images")
    # macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
    context = multiprocessing
    if "forkserver" in multiprocessing.get_all_start_methods():
        context = multiprocessing.get_context("forkserver")

    pool = context.Pool(processes=processes)

    function_parameters = zip(
        images_to_check,
        itertools.repeat(known_names),
        itertools.repeat(known_face_encodings),
        itertools.repeat(tolerance),
        itertools.repeat(show_distance),
    )

    pool.starmap(test_image, function_parameters)
Exemplo n.º 13
0
def processPoolProcessImgs(images2Check, knownNames, knownFaces, numCPU,
                           tolerance, showDist):
    # To multiprocess images for efficiency based on number of CPUs

    # PARAMETERS
    # images2Check      images to check for faces
    # knownNames        list of known names
    # knownFaces        list of known face encodings
    # numCPU            number of CPUs to use . -1 = max
    # tolerance         tolerance . lower = stricter
    # showDist          boolean option to show distance

    # uses multiprocessing module to stream all images to check to testImg function

    if numCPU == -1:
        processes = None
    else:
        processes = numCPU

    context = multiprocessing
    if "forkserver" in multiprocessing.get_all_start_methods():
        context = multiprocessing.get_context("forkserver")

    pool = context.Pool(processes=processes)

    function_parameters = zip(images2Check, itertools.repeat(knownNames),
                              itertools.repeat(knownFaces),
                              itertools.repeat(tolerance),
                              itertools.repeat(showDist))

    pool.starmap(testImg, function_parameters)
Exemplo n.º 14
0
    def multiprocessing_context(self, multiprocessing_context):
        if multiprocessing_context is not None:
            if self.num_workers > 0:
                if isinstance(multiprocessing_context, str) or isinstance(
                        multiprocessing_context, bytes):
                    valid_start_methods = mp.get_all_start_methods()
                    if multiprocessing_context not in valid_start_methods:
                        raise ValueError((
                            'multiprocessing_context option '
                            'should specify a valid start method in {!r}, but got '
                            'multiprocessing_context={!r}').format(
                                valid_start_methods, multiprocessing_context))

            multiprocessing_context = mp.get_context(multiprocessing_context)
            if not isinstance(multiprocessing_context, mp.context.BaseContext):
                raise TypeError((
                    'multiprocessing_context option should be a valid context '
                    'object or a string specifying the start method, but got '
                    'multiprocessing_context={}'
                ).format(multiprocessing_context))

            else:
                raise ValueError(
                    ('multiprocessing_context can only be used with '
                     'multi-process loading (num_workers > 0), but got '
                     'num_workers={}').format(self.num_workers))
        self.__multiprocessing_context = multiprocessing_context
Exemplo n.º 15
0
    def try_multiprocessing_code_with_branching(self, code, expected_out):
        """Run code using multiprocessing, it should produce `expected_out`."""
        self.make_file("multi.py", code)
        self.make_file("multi.rc", """\
            [run]
            concurrency = multiprocessing
            branch = True
            omit = */site-packages/*
            """)

        if env.PYVERSION >= (3, 4):
            start_methods = ['fork', 'spawn']
        else:
            start_methods = ['']

        for start_method in start_methods:
            if start_method and start_method not in multiprocessing.get_all_start_methods():
                continue

            out = self.run_command("coverage run --rcfile=multi.rc multi.py %s" % (start_method,))
            self.assertEqual(out.rstrip(), expected_out)

            out = self.run_command("coverage combine")
            self.assertEqual(out, "")
            out = self.run_command("coverage report -m")

            last_line = self.squeezed_lines(out)[-1]
            self.assertRegex(last_line, r"multi.py \d+ 0 \d+ 0 100%")
Exemplo n.º 16
0
def set_blockchain_params(genesis_block):
    assert 'spawn' in multiprocessing.get_all_start_methods(), 'Not found spawn method.'
    setting_tx = genesis_block.txs[0]
    params = bjson.loads(setting_tx.message)
    V.BLOCK_GENESIS_HASH = genesis_block.hash
    V.BLOCK_PREFIX = params.get('prefix')
    V.BLOCK_CONTRACT_PREFIX = params.get('contract_prefix')
    V.BLOCK_GENESIS_TIME = params.get('genesis_time')
    V.BLOCK_ALL_SUPPLY = params.get('all_supply')
    V.BLOCK_TIME_SPAN = params.get('block_span')
    V.BLOCK_REWARD = params.get('block_reward')
    V.CONTRACT_VALIDATOR_ADDRESS = params.get('validator_address')
    V.COIN_DIGIT = params.get('digit_number')
    V.COIN_MINIMUM_PRICE = params.get('minimum_price')
    V.CONTRACT_MINIMUM_AMOUNT = params.get('contract_minimum_amount')
    consensus = params.get('consensus')
    if isinstance(consensus, dict):
        V.BLOCK_CONSENSUS = consensus
        V.BLOCK_BASE_CONSENSUS = min(consensus.keys())
    else:
        # TODO: remove after test
        pow_ratio = params.get('pow_ratio')
        V.BLOCK_CONSENSUS = {C.BLOCK_YES_POW: pow_ratio, C.BLOCK_POS: 100 - pow_ratio}
        V.BLOCK_BASE_CONSENSUS = C.BLOCK_YES_POW
    GompertzCurve.setup_params()
Exemplo n.º 17
0
    def _setup(self):
        # TODO: use fork context if unix and frozen?
        # if py34+, else fall back
        if hasattr(multiprocessing, "get_context"):
            all_methods = multiprocessing.get_all_start_methods()
            logger.info(
                "multiprocessing start_methods={}".format(",".join(all_methods))
            )
            ctx = multiprocessing.get_context("spawn")
        else:
            logger.info("multiprocessing fallback, likely fork on unix")
            ctx = multiprocessing
        self._multiprocessing = ctx
        # print("t3b", self._multiprocessing.get_start_method())

        # if config_paths was set, read in config dict
        if self._settings.config_paths:
            # TODO(jhr): handle load errors, handle list of files
            config_paths = self._settings.config_paths.split(",")
            for config_path in config_paths:
                config_dict = config_util.dict_from_config_file(config_path)
                if config_dict is None:
                    continue
                if self._config is not None:
                    self._config.update(config_dict)
                else:
                    self._config = config_dict
Exemplo n.º 18
0
    def __init__(self, config, executor_id, job_id):
        self.config = config
        self.executor_id = executor_id
        self.job_id = job_id
        self.use_threads = not is_unix_system()
        self.num_workers = self.config['lithops'].get('workers', CPU_COUNT)
        self.workers = []

        if self.use_threads:
            self.queue = queue.Queue()
            WORKER = Thread
            WOREKR_PROCESS = self._thread_runner
        else:
            if 'fork' in mp.get_all_start_methods():
                mp.set_start_method('fork')
            self.queue = mp.Queue()
            WORKER = mp.Process
            WOREKR_PROCESS = self._process_runner

        for worker_id in range(self.num_workers):
            p = WORKER(target=WOREKR_PROCESS, args=(worker_id,))
            self.workers.append(p)
            p.start()

        logger.info('ExecutorID {} | JobID {} - Localhost runner started '
                    '- {} workers'.format(self.executor_id,
                                          self.job_id,
                                          self.num_workers))
Exemplo n.º 19
0
    def test_done_callback_raises(self) -> None:
        """Future.done() raises Exceptions thrown while processing work?"""
        for method in get_all_start_methods():
            with self.subTest(method=method):
                js = Jobserver(context=method, slots=3)

                # Calling done() repeatedly correctly reports multiple errors
                f = js.submit(fn=len, args=(("hello", )), timeout=None)
                f.when_done(self.helper_raise, ArithmeticError, "123")
                f.when_done(self.helper_raise, ZeroDivisionError, "45")
                with self.assertRaises(CallbackRaised) as c:
                    f.done(timeout=None)
                self.assertIsInstance(c.exception.__cause__, ArithmeticError)
                with self.assertRaises(CallbackRaised) as c:
                    f.done(timeout=None)
                self.assertIsInstance(c.exception.__cause__, ZeroDivisionError)
                self.assertTrue(f.done(timeout=None))
                self.assertTrue(f.done(timeout=0))

                # After callbacks have completed, the result is available.
                self.assertEqual(f.result(), 5)
                self.assertEqual(f.result(), 5)

                # Now that work is complete, adding callback raises immediately
                with self.assertRaises(CallbackRaised) as c:
                    f.when_done(self.helper_raise, UnicodeError, "67")
                self.assertIsInstance(c.exception.__cause__, UnicodeError)
                self.assertTrue(f.done(timeout=0.0))

                # After callbacks have completed, result is still available.
                self.assertEqual(f.result(), 5)
                self.assertEqual(f.result(), 5)
Exemplo n.º 20
0
    def _validate_config_dependencies(self):
        """
        Validate that config values aren't invalid given other config values
        or system-level limitations and requirements.
        """
        is_executor_without_sqlite_support = self.get("core",
                                                      "executor") not in (
                                                          'DebugExecutor',
                                                          'SequentialExecutor',
                                                      )
        is_sqlite = "sqlite" in self.get('core', 'sql_alchemy_conn')
        if is_sqlite and is_executor_without_sqlite_support:
            raise AirflowConfigException(
                f"error: cannot use sqlite with the {self.get('core', 'executor')}"
            )
        if is_sqlite:
            import sqlite3

            # Some of the features in storing rendered fields require sqlite version >= 3.15.0
            min_sqlite_version = '3.15.0'
            if StrictVersion(sqlite3.sqlite_version) < StrictVersion(
                    min_sqlite_version):
                raise AirflowConfigException(
                    f"error: cannot use sqlite version < {min_sqlite_version}")

        if self.has_option('core', 'mp_start_method'):
            mp_start_method = self.get('core', 'mp_start_method')
            start_method_options = multiprocessing.get_all_start_methods()

            if mp_start_method not in start_method_options:
                raise AirflowConfigException("mp_start_method should not be " +
                                             mp_start_method +
                                             ". Possible values are " +
                                             ", ".join(start_method_options))
Exemplo n.º 21
0
 def test_returns_none(self) -> None:
     """None can be returned from a Future?"""
     for method in get_all_start_methods():
         with self.subTest(method=method):
             js = Jobserver(context=method, slots=3)
             f = js.submit(fn=noop, args=(), timeout=None)
             self.assertIsNone(f.result())
Exemplo n.º 22
0
    def test_raises(self) -> None:
        """Future.result() raises Exceptions thrown while processing work?"""
        for method in get_all_start_methods():
            with self.subTest(method=method):
                # Prepare how callbacks will be observed
                mutable = [0]

                # Prepare work interleaving exceptions and success cases
                js = Jobserver(context=method, slots=3)

                # Confirm exception is raised repeatedly
                f = js.submit(
                    fn=self.helper_raise,
                    args=(ArithmeticError, "message123"),
                    timeout=None,
                )
                f.when_done(self.helper_callback, mutable, 0, 1)
                with self.assertRaises(ArithmeticError):
                    f.result()
                self.assertEqual(mutable[0], 1, "One callback observed")
                f.when_done(self.helper_callback, mutable, 0, 2)
                self.assertEqual(mutable[0], 3, "Callback after done")
                with self.assertRaises(ArithmeticError):
                    f.result()
                self.assertTrue(f.done())
                self.assertEqual(mutable[0], 3, "Callback idempotent")

                # Confirm other work processed without issue
                g = js.submit(fn=str, kwargs=dict(object=2), timeout=None)
                self.assertEqual("2", g.result())
Exemplo n.º 23
0
    def test_sleep_fn(self) -> None:
        """Confirm sleep_fn(...) invoked and handled per documentation."""
        for method in get_all_start_methods():
            with self.subTest(method=method):
                js = Jobserver(context=method, slots=1)

                # Confirm negative sleep is detectable with fn never called
                with self.assertRaises(AssertionError):
                    js.submit(fn=len, sleep_fn=lambda: -1.0)

                # Confirm sleep_fn(...) returning zero can proceed
                zs = iter(itertools.cycle((0, None)))
                f = js.submit(fn=len,
                              args=((1, ), ),
                              sleep_fn=lambda: next(zs))

                # Confirm sleep_fn(...) returning finite sleep can proceed
                gs = iter(itertools.cycle((0.1, 0.05, None)))
                g = js.submit(fn=len, args=((), ), sleep_fn=lambda: next(gs))

                # Confirm repeated sleeping can cause a timeout to occur.
                # Note fn is never called as sleep_fn vetoes the invocation.
                hs = iter(itertools.cycle((0.1, )))
                with self.assertRaises(Blocked):
                    js.submit(fn=len, sleep_fn=lambda: next(hs), timeout=0.35)

                # Confirm as expected.  Importantly, results not previously
                # retrieved implying above submissions finalized results.
                self.assertEqual(1, f.result())
                self.assertEqual(0, g.result())
Exemplo n.º 24
0
def fork(loop, cls, *args, **kwargs):
    parent_pipe, child_pipe = multiprocessing.Pipe()
    if threading.active_count() > 1:
        # Cannot use os.fork() on linux when using threads, so we will try
        # instructing the multiprocessing module to use the 'spawn' method
        # instead.
        start_method = multiprocessing.get_start_method(allow_none=True)
        if start_method is None:
            available_start_methods = multiprocessing.get_all_start_methods()
            default_start_method = available_start_methods[0]
            if default_start_method == 'fork':
                multiprocessing.set_start_method("spawn")
        elif start_method == 'fork':
            raise RuntimeError(
                'Cannot fork using start_method "fork" when using threads')
    childpid = os.fork()
    if childpid:
        return Gateway(loop, cls, childpid, parent_pipe)
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    obj = cls(*args, **kwargs)
    obj.pipe = child_pipe
    loop = asyncio.get_event_loop()
    if loop.is_running():
        loop.stop()
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
    loop.add_reader(obj.pipe.fileno(), _handle_call, obj)
    loop.run_forever()
    os._exit(0)
Exemplo n.º 25
0
    def try_multiprocessing_code_with_branching(self, code, expected_out):
        """Run code using multiprocessing, it should produce `expected_out`."""
        self.make_file("multi.py", code)
        self.make_file(
            "multi.rc", """\
            [run]
            concurrency = multiprocessing
            branch = True
            omit = */site-packages/*
            """)

        if env.PYVERSION >= (3, 4):
            start_methods = ['fork', 'spawn']
        else:
            start_methods = ['']

        for start_method in start_methods:
            if start_method and start_method not in multiprocessing.get_all_start_methods(
            ):
                continue

            out = self.run_command(
                "coverage run --rcfile=multi.rc multi.py %s" %
                (start_method, ))
            self.assertEqual(out.rstrip(), expected_out)

            out = self.run_command("coverage combine")
            self.assertEqual(out, "")
            out = self.run_command("coverage report -m")

            last_line = self.squeezed_lines(out)[-1]
            self.assertRegex(last_line, r"multi.py \d+ 0 \d+ 0 100%")
Exemplo n.º 26
0
 def _validate_start_method(self, value):
     available_methods = ["thread"]
     if hasattr(multiprocessing, "get_all_start_methods"):
         available_methods += multiprocessing.get_all_start_methods()
     if value in available_methods:
         return None
     return _error_choices(value, set(available_methods))
Exemplo n.º 27
0
    def test_multiprocessing_with_branching(self):
        nprocs = 3
        upto = 30
        code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs,
                                                         UPTO=upto)
        total = sum(x * x if x % 2 else x * x * x for x in range(upto))
        expected_out = f"{nprocs} pids, total = {total}"
        self.make_file("multi.py", code)
        self.make_file(
            "multi.rc", """\
            [run]
            concurrency = multiprocessing
            branch = True
            omit = */site-packages/*
            """)

        for start_method in ["fork", "spawn"]:
            if start_method and start_method not in multiprocessing.get_all_start_methods(
            ):
                continue

            out = self.run_command(
                f"coverage run --rcfile=multi.rc multi.py {start_method}")
            assert out.rstrip() == expected_out

            out = self.run_command(
                "coverage combine -q")  # sneak in a test of -q
            assert out == ""
            out = self.run_command("coverage report -m")

            last_line = self.squeezed_lines(out)[-1]
            assert re.search(r"TOTAL \d+ 0 \d+ 0 100%", last_line)
Exemplo n.º 28
0
def start_method_fixture(request):
    """Parameterized fixture to choose the start_method for multiprocessing."""
    start_method = request.param
    if start_method not in multiprocessing.get_all_start_methods():
        # Windows doesn't support "fork".
        pytest.skip(f"start_method={start_method} not supported here")
    return start_method
Exemplo n.º 29
0
    def process_images_in_process_pool(self,
                                       images_to_check,
                                       known_face_encodings,
                                       number_of_cpus=4):
        print('Process image: ', images_to_check)
        if number_of_cpus == -1:
            processes = None
        else:
            processes = number_of_cpus

        # macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
        context = multiprocessing.get_context('spawn')
        if "forkserver" in multiprocessing.get_all_start_methods():
            context = multiprocessing.get_context("forkserver")

        pool = context.Pool(processes=processes, maxtasksperchild=100)

        function_parameters = zip(images_to_check,
                                  itertools.repeat(known_face_encodings))

        result = pool.starmap(self.test_image,
                              function_parameters,
                              chunksize=50)
        pool.close()
        pool.join()

        return result
Exemplo n.º 30
0
 def test_callback_semantics(self) -> None:
     """Inside a Future's callback the Future reports it is done."""
     for method in get_all_start_methods():
         with self.subTest(method=method):
             js = Jobserver(context=method, slots=3)
             f = js.submit(fn=len, args=((1, 2, 3), ))
             f.when_done(self.helper_check_semantics, f)
             self.assertEqual(3, f.result())
Exemplo n.º 31
0
def get_context(start_method):
    '''Get a new multiprocessing context.'''
    if start_method is None:
        # Use thread safe method, see issue #217.
        # forkserver faster than spawn but not always available.
        forkserver_available = 'forkserver' in mp.get_all_start_methods()
        start_method = 'forkserver' if forkserver_available else 'spawn'
    return mp.get_context(start_method)
def force_forkserver():
    """
    Forces forkserver multiprocessing mode if not set. This is needed for HPO and CUDA.
    The CUDA runtime does not support the fork start method: either the spawn or forkserver start method are required.
    forkserver is used because spawn is still affected by locking issues
    """
    if ('forkserver' in multiprocessing.get_all_start_methods()) & (not is_forkserver_enabled()):
        logger.warning('WARNING: changing multiprocessing start method to forkserver')
        multiprocessing.set_start_method('forkserver', force=True)
Exemplo n.º 33
0
    def test_multiprocessing(self):
        self.make_file("multi.py", """\
            import multiprocessing
            import os
            import time
            import sys

            def func(x):
                # Need to pause, or the tasks go too quick, and some processes
                # in the pool don't get any work, and then don't record data.
                time.sleep(0.02)
                # Use different lines in different subprocesses.
                if x % 2:
                    y = x*x
                else:
                    y = x*x*x
                return os.getpid(), y

            if __name__ == "__main__":
                if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1])
                pool = multiprocessing.Pool(3)
                inputs = range(30)
                outputs = pool.imap_unordered(func, inputs)
                pids = set()
                total = 0
                for pid, sq in outputs:
                    pids.add(pid)
                    total += sq
                print("%d pids, total = %d" % (len(pids), total))
                pool.close()
                pool.join()
            """)

        if env.PYVERSION >= (3, 4):
            start_methods = ['fork', 'spawn']
        else:
            start_methods = ['']

        for start_method in start_methods:
            if start_method and start_method not in multiprocessing.get_all_start_methods():
                continue

            out = self.run_command(
                "coverage run --concurrency=multiprocessing multi.py %s" % start_method
            )
            total = sum(x*x if x%2 else x*x*x for x in range(30))
            self.assertEqual(out.rstrip(), "3 pids, total = %d" % total)

            self.run_command("coverage combine")
            out = self.run_command("coverage report -m")
            last_line = self.squeezed_lines(out)[-1]
            self.assertEqual(last_line, "multi.py 23 0 100%")
Exemplo n.º 34
0
def get_multiproc_context(capabilities):
    best_concurrency = capabilities.get('Process Startup Method', 'fork')
    if hasattr(multiprocessing, 'get_context'):
        for each in (best_concurrency, 'fork', 'spawn'):
            if hasattr(multiprocessing, 'get_all_start_methods'):
                if each in multiprocessing.get_all_start_methods():
                    return multiprocessing.get_context(each)
            else:
                try:
                    return multiprocessing.get_context(each)
                except ValueError:
                    pass # invalid concurrency for this system
    return None
Exemplo n.º 35
0
def process_images_in_process_pool(images_to_check, known_names, known_face_encodings, number_of_cpus):
    if number_of_cpus == -1:
        processes = None
    else:
        processes = number_of_cpus

    # macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
    context = multiprocessing
    if "forkserver" in multiprocessing.get_all_start_methods():
        context = multiprocessing.get_context("forkserver")

    pool = context.Pool(processes=processes)
    function_parameters = zip(images_to_check, itertools.repeat(known_names), itertools.repeat(known_face_encodings))

    pool.starmap(test_image, function_parameters)
import importlib.machinery
import zipimport
import unittest
import sys
import os
import os.path
import py_compile

from test.script_helper import (
    make_pkg, make_script, make_zip_pkg, make_zip_script,
    assert_python_ok, assert_python_failure, temp_dir,
    spawn_python, kill_python)

# Look up which start methods are available to test
import multiprocessing
AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods())

verbose = support.verbose

test_source = """\
# multiprocessing includes all sorts of shenanigans to make __main__
# attributes accessible in the subprocess in a pickle compatible way.

# We run the "doesn't work in the interactive interpreter" example from
# the docs to make sure it *does* work from an executed __main__,
# regardless of the invocation mechanism

import sys
import time
from multiprocessing import Pool, set_start_method
__author__ = 'fpbatta'

import time

import numpy as np
import sys
import multiprocessing as mp
if __name__ == '__main__':
    #mp.freeze_support()

    print(mp.get_all_start_methods())
    sm = mp.get_start_method()
    print("sm: ", sm)
    mp.set_start_method('forkserver', force=True)
    print("sm 2: ", sm)

    sys.path.append('/home/fpbatta/src/GUI/Plugins')
    sys.path.append('/home/fpbatta/src/GUI/Plugins/multiprocessing_plugin')

    #sys.path.append('/Users/fpbatta/src/GUImerge/GUI/Plugins/multiprocessing_plugin')
    from multiprocessing_plugin.multiprocessing_plugin import MultiprocessingPlugin


    m = MultiprocessingPlugin()

    m.startup(20000.)
    m.bufferfunction(np.random.random((11,1000)))

    for i in range(100):
        m.bufferfunction(200. * np.random.random((11,1000)))
        time.sleep(0.05)
Exemplo n.º 38
0
import signal
import unittest
import threading
import multiprocessing

from concurrent.futures import TimeoutError

import pebble
from pebble import ProcessPool, ProcessExpired


# set start method
supported = False

if sys.version_info.major > 2:
    methods = multiprocessing.get_all_start_methods()
    if 'spawn' in methods:
        try:
            multiprocessing.set_start_method('spawn')

            if multiprocessing.get_start_method() == 'spawn':
                supported = True
        except RuntimeError:  # child process
            pass


initarg = 0


def initializer(value):
    global initarg
Exemplo n.º 39
0
        self.loop.run_until_complete(do_acquire())
        p.start()
        self.lock.release()
        out = q.get(timeout=5)
        p.join()
        self.assertTrue(isinstance(out, bool))


class SpawnLockMixingTest(LockMixingTest):
    def setUp(self):
        super().setUp()
        context = get_context('spawn')
        self.lock = aioprocessing.AioLock(context=context)


if 'forkserver' in get_all_start_methods():
    class ForkServerLockMixingTest(LockMixingTest):
        def setUp(self):
            super().setUp()
            context = get_context('forkserver')
            self.lock = aioprocessing.AioLock(context=context)


class SemaphoreTest(BaseTest):
    def setUp(self):
        super().setUp()
        self.sem = aioprocessing.AioSemaphore(2)

    def _test_semaphore(self, sem):
        self.assertReturnsIfImplemented(2, get_value, sem)
        self.assertEqual(True, sem.acquire())