def runInThread(cls, worker_fun, worker_fun_args, on_thread_finish=None, on_thread_exception=None, skip_raise_exception=False): """ Run a function inside a thread. :param worker_fun: reference to function to be executed inside a thread :param worker_fun_args: arguments passed to a thread function :param on_thread_finish: function to be called after thread finishes its execution :param skip_raise_exception: Exception raised inside the 'worker_fun' will be passed to the calling thread if: - on_thread_exception is a valid function (it's exception handler) - skip_raise_exception is False :return: reference to a thread object """ def on_thread_finished_int(thread_arg, on_thread_finish_arg, skip_raise_exception_arg, on_thread_exception_arg): if thread_arg.worker_exception: if on_thread_exception_arg: on_thread_exception_arg(thread_arg.worker_exception) else: if not skip_raise_exception_arg: raise thread_arg.worker_exception else: if on_thread_finish_arg: on_thread_finish_arg() if threading.current_thread() != threading.main_thread(): # starting thread from another thread causes an issue of not passing arguments' # values to on_thread_finished_int function, so on_thread_finish is not called st = traceback.format_stack() print('Running thread from inside another thread. Stack: \n' + ''.join(st)) thread = WorkerThread(worker_fun=worker_fun, worker_fun_args=worker_fun_args) # in Python 3.5 local variables sometimes are removed before calling on_thread_finished_int # so we have to bind that variables with the function ref bound_on_thread_finished = partial(on_thread_finished_int, thread, on_thread_finish, skip_raise_exception, on_thread_exception) thread.finished.connect(bound_on_thread_finished) thread.started.connect(lambda: cls._global_thread_pool.append(thread)) thread.finished.connect(lambda: cls._global_thread_pool.remove(thread)) thread.start() return thread
def doThreadedWork(runtime, scriptName, queryResults, restEndpoint, userContext, numberOfThreads): try: jobThreads = [] jobName = runtime.jobName ## Queue used to transfer endpoints to worker threads inputQueue = Queue() ## Spin up the worker threads runtime.logger.info( 'Spinning up {numberOfJobThreads!r} threads for job {jobName!r}', numberOfJobThreads=numberOfThreads, jobName=jobName) for i in range(int(numberOfThreads)): activeThread = WorkerThread(runtime, scriptName, inputQueue, restEndpoint, userContext) activeThread.start() jobThreads.append(activeThread) ## Load ITDM data set into a shared queue for the worker threads for result in queryResults: while True: # loop for the Queue_Full catch try: inputQueue.put(result, True, .1) break except Queue_Full: ## If the queue is full, wait for the workers to pull data ## off and free up room; no need to break here. Note: this ## is blocking; we may want to convert into asynchronous ## thread/defered, to avoid parent control/breaks. continue except: runtime.setError(__name__) break ## Wait for the threads while not inputQueue.empty(): time.sleep(3) ## Tell all idle threads (threads not currently running a job on ## an endpoint) to gracefully shut down runtime.logger.info('Spinning down active threads for job {jobName!r}', jobName=jobName) for jobThread in jobThreads: jobThread.completed = True ## Ensure job threads finish properly, before removing references runtime.logger.info('Waiting for threads to clean up...') while True: updatedJobThreads = [] for jobThread in jobThreads: if (jobThread is not None and jobThread.is_alive()): updatedJobThreads.append(jobThread) jobThreads = updatedJobThreads if len(jobThreads) > 0: ## I don't want to 'join' the threads, but should replace ## the blocking sleep with something that can be interrupted time.sleep(.5) else: break runtime.logger.info('Threads finished.') except: runtime.setError(__name__) ## end doThreadedWork return