def base_case(self): queue = Queue() t = self.klass(queue=queue) t.start() t.join() eq_(queue.get(block=False), 7) ok_(queue.empty())
def base_case(self): queue = Queue() t = self.klass(queue=queue) t.start() t.join() assert queue.get(block=False) == 7 assert queue.empty()
def base_case(self): queue = Queue() t = EHThread(target=self.worker, args=[queue]) t.start() t.join() eq_(queue.get(block=False), 7) ok_(queue.empty())
def base_case(self): queue = Queue() t = EHThread(target=self.worker, args=[queue]) t.start() t.join() assert queue.get(block=False) == 7 assert queue.empty()
def run(self, *args, **kwargs): results = GroupResult() queue = Queue() threads = [] for cxn in self: my_kwargs = dict( cxn=cxn, queue=queue, args=args, kwargs=kwargs, ) thread = ExceptionHandlingThread( target=thread_worker, kwargs=my_kwargs, ) threads.append(thread) for thread in threads: thread.start() for thread in threads: # TODO: configurable join timeout # TODO: (in sudo's version) configurability around interactive # prompting resulting in an exception instead, as in v1 thread.join() # Get non-exception results from queue while not queue.empty(): # TODO: io-sleep? shouldn't matter if all threads are now joined cxn, result = queue.get(block=False) # TODO: outstanding musings about how exactly aggregate results # ought to ideally operate...heterogenous obj like this, multiple # objs, ?? results[cxn] = result # Get exceptions from the threads themselves. # TODO: in a non-thread setup, this would differ, e.g.: # - a queue if using multiprocessing # - some other state-passing mechanism if using e.g. coroutines # - ??? excepted = False for thread in threads: wrapper = thread.exception() if wrapper is not None: # Outer kwargs is Thread instantiation kwargs, inner is kwargs # passed to thread target/body. cxn = wrapper.kwargs['kwargs']['cxn'] results[cxn] = wrapper.value excepted = True if excepted: raise GroupException(results) return results
def manual_threading_works_okay(self): # TODO: needs https://github.com/pyinvoke/invoke/issues/438 fixed # before it will reliably pass skip() # Kind of silly but a nice base case for "how would someone thread this # stuff; and are there any bizarre gotchas lurking in default # config/context/connection state?" # Specifically, cut up the local (usually 100k's long) words dict into # per-thread chunks, then read those chunks via shell command, as a # crummy "make sure each thread isn't polluting things like stored # stdout" sanity test queue = Queue() # TODO: skip test on Windows or find suitable alternative file with codecs.open(_words, encoding='utf-8') as fd: data = [x.strip() for x in fd.readlines()] threads = [] num_words = len(data) chunksize = len(data) / len(self.cxns) # will be an int, which is fine for i, cxn in enumerate(self.cxns): start = i * chunksize end = max([start + chunksize, num_words]) chunk = data[start:end] kwargs = dict( queue=queue, cxn=cxn, start=start, num_words=num_words, count=len(chunk), expected=chunk, ) thread = ExceptionHandlingThread(target=_worker, kwargs=kwargs) threads.append(thread) for t in threads: t.start() for t in threads: t.join(5) # Kinda slow, but hey, maybe the test runner is hot while not queue.empty(): cxn, result, expected = queue.get(block=False) for resultword, expectedword in zip_longest(result, expected): err = u"({2!r}, {3!r}->{4!r}) {0!r} != {1!r}".format( resultword, expectedword, cxn, expected[0], expected[-1], ) assert resultword == expectedword, err