def test_child_retrieves_resource_tracker(self): parent_rtracker_pid = get_rtracker_pid() executor = ProcessPoolExecutor(max_workers=2) child_rtracker_pid = executor.submit(get_rtracker_pid).result() # First simple pid retrieval check (see #200) assert child_rtracker_pid == parent_rtracker_pid # Register a resource in the parent process, and un-register it in the # child process. If the two processes do not share the same # resource_tracker, a cache KeyError should be printed in stderr. import subprocess cmd = '''if 1: import os, sys from loky import ProcessPoolExecutor from loky.backend import resource_tracker from loky.backend.semlock import SemLock from tempfile import NamedTemporaryFile tmpfile = NamedTemporaryFile(delete=False) tmpfile.close() filename = tmpfile.name resource_tracker.VERBOSE = True resource_tracker.register(filename, "file") def maybe_unlink(name, rtype): # resource_tracker.maybe_unlink is actually a bound method of the # ResourceTracker. We need a custom wrapper to avoid object # serialization. from loky.backend import resource_tracker resource_tracker.maybe_unlink(name, rtype) print(filename) e = ProcessPoolExecutor(1) e.submit(maybe_unlink, filename, "file").result() e.shutdown() ''' try: p = subprocess.Popen([sys.executable, '-E', '-c', cmd], stderr=subprocess.PIPE, stdout=subprocess.PIPE) p.wait() filename = p.stdout.readline().decode('utf-8').strip() err = p.stderr.read().decode('utf-8') p.stderr.close() p.stdout.close() pattern = "decremented refcount of file %s" % filename assert pattern in err assert "leaked" not in err pattern = "KeyError: '%s'" % filename assert pattern not in err finally: executor.shutdown()
def test_child_retrieves_resource_tracker(self): parent_rtracker_pid = get_rtracker_pid() executor = ProcessPoolExecutor(max_workers=2) child_rtracker_pid = executor.submit(get_rtracker_pid).result() # First simple pid retrieval check (see #200) assert child_rtracker_pid == parent_rtracker_pid # Register a resource in the parent process, and un-register it in the # child process. If the two processes do not share the same # resource_tracker, a cache KeyError should be printed in stderr. import subprocess folder_name = 'loky_tempfolder' cmd = '''if 1: import os, sys from loky import ProcessPoolExecutor from loky.backend import resource_tracker from loky.backend.semlock import SemLock resource_tracker.VERBOSE=True folder_name = "{}" # We don't need to create the semaphore as registering / unregistering # operations simply add / remove entries from a cache, but do not # manipulate the actual semaphores. resource_tracker.register(folder_name, "folder") def unregister(name, rtype): # resource_tracker.unregister is actually a bound method of the # ResourceTracker. We need a custom wrapper to avoid object # serialization. from loky.backend import resource_tracker resource_tracker.unregister(folder_name, rtype) e = ProcessPoolExecutor(1) e.submit(unregister, folder_name, "folder").result() e.shutdown() ''' try: p = subprocess.Popen( [sys.executable, '-E', '-c', cmd.format(folder_name)], stderr=subprocess.PIPE) p.wait() err = p.stderr.read().decode('utf-8') p.stderr.close() assert re.search("unregister %s" % folder_name, err) is not None assert re.search("leaked", err) is None assert re.search("KeyError: '%s'" % folder_name, err) is None finally: executor.shutdown()
def test_worker_timeout_mock(self): timeout = .001 context = get_context() executor = ProcessPoolExecutor( max_workers=4, context=context, timeout=timeout) result_queue = DelayedSimpleQueue(ctx=context, delay=.001) executor._result_queue = result_queue with pytest.warns(UserWarning, match=r'^A worker stopped while some jobs'): for i in range(5): # Trigger worker spawn for lazy executor implementations for result in executor.map(id, range(8)): pass executor.shutdown() result_queue.close()
def target(): def done(future: Future): try: q_out.put(future.result()) q_in.task_done() except BaseException: stop_event.set() raise finally: counter.release() # start worker executor = ProcessPoolExecutor(n_workers) counter = Semaphore(n_workers) wait = True try: for value in iter(q_in.get, SourceExhausted()): counter.acquire() executor.submit(transform, value, *args, **kwargs).add_done_callback(done) # wait for other processes q_in.task_done() q_in.join() q_out.put(SourceExhausted()) except StopEvent: pass except BaseException: wait = False stop_event.set() raise finally: executor.shutdown(wait=wait)