def apply_async(self, target, args=None, kwargs=None, callbacks=None, errbacks=None, accept_callback=None, **compat): args = args or [] kwargs = kwargs or {} callbacks = callbacks or [] errbacks = errbacks or [] on_ready = curry(self.on_ready, callbacks, errbacks) self.logger.debug("ThreadPool: Apply %s (args:%s kwargs:%s)" % (target, args, kwargs)) req = WorkRequest(do_work, (target, args, kwargs, on_ready, accept_callback)) self._pool.putRequest(req) # threadpool also has callback support, # but for some reason the callback is not triggered # before you've collected the results. # Clear the results (if any), so it doesn't grow too large. self._pool._results_queue.queue.clear() return req
def run(self, *ignored): while True: try: message = self.factory.receive(self.destination, self.wait_interval) self.logger.log( TRACE1, "Message received [%s]" % str(message).decode("utf-8")) work_request = WorkRequest(self.handler.handle, [message]) self.handlers_pool.putRequest(work_request) try: self.handlers_pool.poll() except NoResultsPending, e: pass except NoMessageAvailableException, e: self.logger.log( TRACE1, "Consumer did not receive a message. %s" % self._get_destination_info()) except WebSphereMQJMSException, e: self.logger.error( "%s in run, e.completion_code=[%s], " "e.reason_code=[%s]" % (e.__class__.__name__, e.completion_code, e.reason_code)) raise
def send_stream_request(self, identifier, fsource, name='file', size=None, stream_type=None): """Starts a file transfer. For Slack, the size and stream_type are unsupported""" stream = Stream(identifier, fsource, name, size, stream_type) log.debug("Requesting upload of {0} to {1} (size hint: {2}, stream type: {3})".format(name, identifier.channelname, size, stream_type)) self.thread_pool.putRequest(WorkRequest(self._slack_upload, args=(stream,))) return stream
def ga_filter(self, msg, cmd, args, dry_run): """ :param msg: The original chat message. :param cmd: The command name itself. :param args: Arguments passed to the command. :param dry_run: True when this is a dry-run. Dry-runs are performed by certain commands (such as !help) to check whether a user is allowed to perform that command if they were to issue it. If dry_run is True then the plugin shouldn't actually do anything beyond returning whether the command is authorized or not. """ logging.getLogger('requests.packages.urllib3').setLevel(logging.DEBUG) try: if 'collect' in self and self['collect']: if 'ga-cid' not in self: self['ga-cid'] = str(uuid.uuid4()) session = requests.Session() session.headers['User-Agent'] = 'Errbot/%s' % version.VERSION tracker = Tracker('UA-82261413-1', client_id=self['ga-cid'], requests_session=session) event = Event(category='commands', action=cmd, label=self._bot.all_commands[cmd].__self__.namespace, value=1) self._bot.thread_pool.putRequest(WorkRequest(tracker.send, [event, ])) except: self.log.exception('Command tracking failed.') return msg, cmd, args
def run_prod(): cycle_count = 1 main = ThreadPool(num_workers=PARSER_THREAD_COUNT) while True: ADMIN_LOGGER.info("Starting cycle : " + str(cycle_count)) reload(P_ROOT) process_list = [[ e, __import__(P_ROOT.__name__ + '.' + e + '.main', fromlist=e) ] for e in P_ROOT.__all__] process_dict = dict(process_list) ADMIN_LOGGER.info("Executing process list : " + str(process_dict.items())) for proc_name in process_dict.keys(): proc = getattr(process_dict.get(proc_name), 'Parser', 'None') main.putRequest( WorkRequest(proc_runner, args=(1, proc), callback=None)) ADMIN_LOGGER.info("Started thread : " + proc_name) try: main.poll() except NoResultsPending: pass except: ADMIN_LOGGER.error(traceback.format_exc()) main.wait() ADMIN_LOGGER.info("Sleeping for default LISTING_SLEEP_TIME : " + str(GLOBAL_SLEEP_TIME)) time.sleep(GLOBAL_SLEEP_TIME) cycle_count = 1 if cycle_count > 9999 else cycle_count + 1
def addWorker(self, callback, *args, **kwargs): """添加线程worker,使用Python的线程池,不依赖Tornado的异步模式 """ request = WorkRequest(callback, args, kwargs) self.putRequest(request)
def add_work_unit(self, name, cmd): if self.is_work_unit_waiting(name): # we create requests arg = {'cmd': cmd, 'package': name} req = WorkRequest(self.do_execute, [arg], None, callback=self.result_callback, exc_callback=self.handle_exception) # then we put the work request in the queue... self.set_work_unit_status(name, 'queued') self.pool.putRequest(req)
def translationWord(allWords): def translate(baseWord, word): chinese = translateFromWord(word) allWords[baseWord] = chinese requests = [] for baseWord, word in zip(allWords.keys(), allWords.values()): requests.append(WorkRequest(translate, args=(baseWord, word))) pool = ThreadPool(num_workers=8) [pool.putRequest(req) for req in requests] try: pool.poll(True) except NoResultsPending: pool.wait()
def _process_command(self, mess, cmd, args, match): """Process and execute a bot command""" # first it must go through the command filters mess, cmd, args = self._process_command_filters(mess, cmd, args, False) if mess is None: log.info("Command %s blocked or deferred." % cmd) return frm = mess.frm username = frm.person user_cmd_history = self.cmd_history[username] log.info("Processing command '{}' with parameters '{}' from {}".format( cmd, args, frm)) if (cmd, args) in user_cmd_history: user_cmd_history.remove( (cmd, args)) # Avoids duplicate history items f = self.re_commands[cmd] if match else self.commands[cmd] if f._err_command_admin_only and self.bot_config.BOT_ASYNC: # If it is an admin command, wait until the queue is completely depleted so # we don't have strange concurrency issues on load/unload/updates etc... self.thread_pool.wait() if f._err_command_historize: user_cmd_history.append( (cmd, args )) # add it to the history only if it is authorized to be so # Don't check for None here as None can be a valid argument to str.split. # '' was chosen as default argument because this isn't a valid argument to str.split() if not match and f._err_command_split_args_with != '': try: if hasattr(f._err_command_split_args_with, "parse_args"): args = f._err_command_split_args_with.parse_args(args) elif callable(f._err_command_split_args_with): args = f._err_command_split_args_with(args) else: args = args.split(f._err_command_split_args_with) except Exception as e: self.send_simple_reply( mess, "Sorry, I couldn't parse your arguments. {}".format(e)) return if self.bot_config.BOT_ASYNC: wr = WorkRequest( self._execute_and_send, [], { 'cmd': cmd, 'args': args, 'match': match, 'mess': mess, 'template_name': f._err_command_template }) self.thread_pool.putRequest(wr) if f._err_command_admin_only: # Again, if it is an admin command, wait until the queue is completely # depleted so we don't have strange concurrency issues. self.thread_pool.wait() else: self._execute_and_send(cmd=cmd, args=args, match=match, mess=mess, template_name=f._err_command_template)
def put(self, callable_, args=None, kwds=None): self.pool.putRequest(WorkRequest(callable_, args=args, kwds=kwds))
def send_stream_request(self, identifier, fsource, name='file', size=None, stream_type=None): """Starts a file transfer. :param identifier: TelegramPerson or TelegramMUCOccupant Identifier of the Person or Room to send the stream to. :param fsource: str, dict or binary data File URL or binary content from a local file. Optionally a dict with binary content plus metadata can be given. See `stream_type` for more details. :param name: str, optional Name of the file. Not sure if this works always. :param size: str, optional Size of the file obtained with os.path.getsize. This is only used for debug logging purposes. :param stream_type: str, optional Type of the stream. Choices: 'document', 'photo', 'audio', 'video', 'sticker', 'location'. If 'video', a dict is optional as {'content': fsource, 'duration': str}. If 'voice', a dict is optional as {'content': fsource, 'duration': str}. If 'audio', a dict is optional as {'content': fsource, 'duration': str, 'performer': str, 'title': str}. For 'location' a dict is mandatory as {'latitude': str, 'longitude': str}. For 'venue': TODO # see: https://core.telegram.org/bots/api#sendvenue :return stream: str or Stream If `fsource` is str will return str, else return Stream. """ def _telegram_metadata(fsource): if isinstance(fsource, dict): return fsource.pop('content'), fsource else: return fsource, None def _is_valid_url(url): try: from urlparse import urlparse except Exception: from urllib.parse import urlparse return bool(urlparse(url).scheme) content, meta = _telegram_metadata(fsource) if isinstance(content, str): if not _is_valid_url(content): raise ValueError("Not valid URL: {}".format(content)) self._telegram_special_message(chat_id=identifier.id, content=content, msg_type=stream_type, **meta) log.debug( "Requesting upload of {0} to {1} (size hint: {2}, stream type: {3})" .format(name, identifier.username, size, stream_type)) stream = content else: stream = Stream(identifier, content, name, size, stream_type) log.debug( "Requesting upload of {0} to {1} (size hint: {2}, stream type: {3})" .format(name, identifier, size, stream_type)) self.thread_pool.putRequest( WorkRequest(self._telegram_upload_stream, args=(stream, ))) return stream
def _enqueue_flow(self, flow): with self._lock: if flow not in self.in_flight: self.in_flight.append(flow) self._pool.putRequest(WorkRequest(self.execute, args=(flow, )))
"""This f'n, as a callback, is blocking. It blocks the whole program, regardless of number of processes or CPUs/cores""" print('handleOutput got: %r, %r' % (request, output)) outputs.append(output) #print('pausing') #for i in xrange(100000000): # pass if __name__ == '__main__': ncpus = processing.cpuCount() nthreads = ncpus + 1 print('ncpus: %d, nthreads: %d' % (ncpus, nthreads)) pool = ThreadPool(nthreads) # create a threading pool t0 = time.time() #arr = np.random.random(10000000) #for i, val in enumerate([1000000000]*10):#range(10): for i in range(10): args = (i, 1000000000) print('queueing task %d' % i) request = WorkRequest(f, args=args, callback=handleOutput) # these requests will only multithread if f is a C extension call?? definitely don't multithread if f is pure Python pool.putRequest(request) print('done queueing tasks') pool.wait() print('tasks took %.3f sec' % time.time()) print('outputs: %r' % outputs) time.sleep( 2 ) # pause so you can watch the parent thread in taskman hang around after worker threads exit
def register_task(client_exe_path, msg): request = WorkRequest(thread_work, (client_exe_path, msg), {}, callback=thread_work_cb, exc_callback=handle_exception) thread_pool.putRequest(request)