def _transfer(self, files, action): # Enqueue queue = Queue() for file in files: os.access(file, os.R_OK) queue.put((file, 0)) workers = [] failed_files = [] result = dict() self._failed_files_lock = Lock() #Starting threads for n in range(min(self._pool, len(files))): worker = Thread(name="Worker-%s" % n, target=self._worker, args=(action, queue, result, failed_files)) self._logger.debug("Starting worker '%s'", worker.getName()) worker.start() workers.append(worker) # Join workers for worker in workers: worker.join() self._logger.debug("Worker '%s' finished", worker.getName()) if failed_files: raise TransferError("Cannot process several files. %s" % [", ".join(failed_files)]) self._logger.info("Transfer complete!") # Return tuple of all files def set_access_data(self, access_data): self._logger.debug('Transfer result: %s', (result,)) return tuple(result[file] for file in files)
def spawn_threads(mongo_res): try: start = int(datetime.now().timestamp() * 1000 - float(Config.SLIDING_WINDOW) * 1000) end = int(datetime.now().timestamp() * 1000) interval_list = generate_time_intervals( start, int(Config.SLIDING_WINDOW_PIECE) * 1000, end) data_list1 = [] for lst in interval_list: data = get_data_in_interval(mongo_res, lst[0], lst[1]) data_list1.append(get_result_table(data)) print("Interval list: ", interval_list) print("Data list1: ", data_list1) var_thread = Thread(target=variance_method, args=(data_list1, )) var_thread.daemon = True var_thread.setName("Variance Method thread") slide_thread = Thread(target=sliding_window, args=(data_list1, )) slide_thread.daemon = True slide_thread.setName("Sliding Window thread") var_thread.start() slide_thread.start() print("Threads started :" + var_thread.getName() + ", " + slide_thread.getName()) except BaseException as e: print("Error in Spawn threads") print(e)
def quickSort(arr, low, high): if (low < high): p = partition(arr, low, high) t1 = Thread(target=quickSort(arr, low, p - 1)) t2 = Thread(target=quickSort(arr, p + 1, high)) t1.start() t2.start() t1.join() print t1.getName() t2.join() print t2.getName()
def _handle_connect(self, conn_event): """ Prepares and starts new thread for a new Bittrex connection. :param conn_event: Contains the connection object. :type conn_event: ConnectEvent """ thread = Thread(target=self._init_connection, args=(conn_event.conn_obj, )) self.threads[thread.getName()] = thread conn_event.conn_obj.assign_thread(thread.getName()) self.connections.update({conn_event.conn_obj.id: conn_event.conn_obj}) thread.start()
def run_async(func, *args, **kwargs): """ run_async(func) function decorator, intended to make "func" run in a separate thread (asynchronously). Returns the created Thread object E.g.: @run_async def task1(): do_something @run_async def task2(): do_something_too t1 = task1() t2 = task2() ... t1.join() t2.join() """ from threading import Thread worker = Thread(target = func, args = args, kwargs = kwargs) __workersByName[worker.getName()] = worker worker.start() # TODO: attach post-func decorator to target function and remove thread from __workersByName return worker
def interval_host(host, time, f, *args, **kwargs): ''' Creates an Event attached to the *host* that will execute the *f* function every *time* seconds. See example in :ref:`sample_inter` :param Proxy host: proxy of the host. Can be obtained from inside a class with ``self.host``. :param int time: seconds for the intervals. :param func f: function to be called every *time* seconds. :param list args: arguments for *f*. :return: :class:`Event` instance of the interval. ''' def wrap(*args, **kwargs): thread = currentThread() args = list(args) stop_event = args[0] del args[0] args = tuple(args) while not stop_event.is_set(): f(*args, **kwargs) stop_event.wait(time) host.detach_interval(thread_id) t2_stop = Event() args = list(args) args.insert(0, t2_stop) args = tuple(args) t = Thread(target=wrap, args=args, kwargs=kwargs) t.start() thread_id = t.getName() host.attach_interval(thread_id, t2_stop) return t2_stop
def init(self, callback=None, timeout=10): if callback is None: return self._init() t = Thread(target=self._init, args=[callback, timeout]) t.setName('dd-downloader-init-' + t.getName()) t.setDaemon(True) t.start()
def download(self): if DEBUG: print "http-sdownload: download()" self.cancelled = False if self.downloader.picker.am_I_complete(): self.downloader.downloads.remove(self) return self.index = self.downloader.picker.next(haveall, self._want, self) if self.index is None: self.resched(0.01) return if (self.index is None and not self.endflag and not self.downloader.peerdownloader.has_downloaders()): self.endflag = True self.index = self.downloader.picker.next(haveall, self._want, self) if self.index is None: self.endflag = True self.resched() else: self.url = (self.seedurl + '&piece=' + str(self.index)) self._get_requests() if self.request_size < self.downloader.storage._piecelen( self.index): self.url += '&ranges=' + self._request_ranges() rq = Thread(target=self._request) rq.setName("HoffmanHTTPDownloader" + rq.getName()) rq.setDaemon(True) rq.start() self.active = True
def spawnThreads(phases, program): queues = [Queue() for _ in phases] queues.append(queues[0]) # for wraparound names = [chr(ord("A") + i) for i in range(len(phases))] debug = Queue() setDebugStream(lambda *args: debug.put(args)) Thread(name="Debug Printer", target=debugPrinter, args=[debug]).start() threads = [] for phase, input, output, name in zip(phases, queues, queues[1:], names): input.put(phase) a = IntCode(program, input=input.get, output=output.put, name=name) thread = Thread(name=name, target=a.run) threads.append(thread) thread.start() print("Spawned", name, "with", phase) # initial input queues[0].put(0) for thread in threads: print("joining", thread.getName()) thread.join() got = queues[0].get() print("Joined all for perm", phases, "and got", got) return got
def init(self, callback = None, timeout = 10): if callback is None: return self._init() t = Thread(target=self._init, args=[callback, timeout]) t.setName('dd-downloader-init-' + t.getName()) t.setDaemon(True) t.start()
def start1(path, threads=1): for i in range(threads + 1): th = Thread(target=begin, name=i, args=(path, )) th.start() th.join() print(th.getName(), '\n') time.sleep(1)
def check_balance(Device): data = "" while True: q = queue.Queue() t = Thread(target=lambda queue, Device: queue.put(Device.status[ "provider"].balance(Device)), args=( q, Device, )) Device.status["cThread"].append(t) if thread_processing.check_ready(t.getName(), Device.status["cThread"]) == True: pass t.start() t.join() data = q.get() Device.status["cThread"].pop(0) if data != "" and data != None: break if data["data"]["balance"] == "": query = "update sim set sim.check = \"False\" where sim_number = \"" + data[ "phone_number"] + "\";" get_info(query) else: query = "update sim set sim.check = \"checked\", balance = " + data[ "data"]["balance"] + ", expire_date = \"" + data["data"][ "date"] + "\" where sim_number = \"" + data[ "phone_number"] + "\";" get_info(query) return data
def rerequest(self, s, callback): # ProxyService_ # doe_mode = self.config.get('doe_mode', 0) if doe_mode == DOE_MODE_PRIVATE: if DEBUG: print >> sys.stderr, "Rerequester: rerequest exited." # + str(doe_mode) return else: if DEBUG: print >> sys.stderr, "Rerequester: rerequest did not exit" # + str(doe_mode) # # _ProxyService if not self.lock.isfinished( ): # still waiting for prior cycle to complete?? def retry(self=self, s=s, callback=callback): self.rerequest(s, callback) self.sched(retry, 5) # retry in 5 seconds return self.lock.reset() rq = Thread(target=self._rerequest, args=[s, callback]) rq.setName("TrackerRerequestA" + rq.getName()) # Arno: make this a daemon thread so the client closes sooner. rq.setDaemon(True) rq.start()
def __init__(self, host='', port=58858): self.port = port self.handler = ThreadedHttpRequestHandler #load gps icon into handler memory self.iconhref = '/gps.png</href>' d = abspath(workdir + '/rc/gps.png') f = open(d, 'rb') self.handler.iconPng = f.read() f.close() #load plugin.html into handler memory d = abspath(workdir + '/rc/plugin.html') f = open(d, 'r') self.handler.plugin = f.read() f.close() #load gps.kml template into self memory d = abspath(workdir + '/rc/gps.kml') f = open(d, 'r') self.gps_template = f.read() f.close() #push gps.kml into handler memory self.updateKML() #self.gps self.server = HTTPServer((host, port), self.handler) server_thread = Thread(target=self.server.serve_forever, name='kmld-thread') server_thread.setDaemon(True) server_thread.start() print "Server loop running in thread:", server_thread.getName()
def run(self): # create the target url list ,because the data we need is a rank, # so we don't need to get all data ,just top 100 is OK # each page has 20 data, we just need to parse page 1 to 5 target_urls = [self.start_url.format(i) for i in range(1, 6)] # travers this target url list and add every one into url queue for url in target_urls: qiDianNovelUrlQueue.put(url) # try parse all url and get all page element add into the qiDianNovelEleQueue for i in range(3): t = Thread(target=self.parse_list_urls) t.setDaemon(True) t.setName("qiDianUrlParse{}".format(i)) t.start() novellogger.info( "threading which named {} begin to parse novel url".format( t.getName())) # start data handler time.sleep(5) qiDian_data_handler() for q in [ qiDianNovelElementQueue, qiDianNovelUrlQueue, qiDianNovelDataQueue ]: q.join()
def data_hander_and_save(html): # deal the html string to right format we need new_html = _html_hander(html) videologger.info("hand the html text into we need successful") # get the elements for get data through Xpath elements = _get_element(new_html) videologger.info("get elements for xpath successful") # get data dict _get_data(elements) # start three threading to check data if already saved and saved the new data thread_list = [] for i in range(3): T = Thread(target=_check_finger_and_save_data) # set threading name T.setName("videoDataHander{}".format(i)) thread_list.append(T) for t in thread_list: # set threading follow the main threading # than start the threading t.setDaemon(True) t.start() videologger.info( "threading which named {} to check and save data".format( T.getName())) # join the queue to ensure the main threading won't stop until the queue is empty videoDataQueue.join() if videoDataQueue.empty(): videologger.info("video data queus is empty, the spiders will be stop")
def _is_order_queue(self): if self.order_queue is None: self.order_queue = queue.Queue() thread = Thread(target=self._start_order_queue) thread.daemon = True self.threads[thread.getName()] = thread thread.start()
def _handle_connect(self, conn_event): """ Prepares and starts new thread for a new Bittrex connection. :param conn_event: Contains the connection object. :type conn_event: ConnectEvent """ thread = Thread(target=self._init_connection, args=(conn_event.conn_obj,)) self.threads[thread.getName()] = thread conn_event.conn_obj.assign_thread(thread.getName()) self.connections.update({conn_event.conn_obj.id: conn_event.conn_obj}) try: thread.start() except WebSocketConnectionClosedException: print(WebSocketBadStatusException) print('Received in _handle_connect. Report to github.')
def rerequest(self, s, callback): # ProxyService_ # proxy_mode = self.config.get('proxy_mode',0) if DEBUG: if proxy_mode == PROXY_MODE_PRIVATE: if True: print "_rerequest exited."# + str(proxy_mode) return else: if True: print "_rerequest did not exit"# + str(proxy_mode) # # _ProxyService if not self.lock.isfinished(): # still waiting for prior cycle to complete?? def retry(self = self, s = s, callback = callback): self.rerequest(s, callback) self.sched(retry, 5) # retry in 5 seconds return self.lock.reset() rq = Thread(target = self._rerequest, args = [s, callback]) rq.setName( "TrackerRerequestA"+rq.getName() ) # Arno: make this a daemon thread so the client closes sooner. rq.setDaemon(True) rq.start()
def download(self): if DEBUG: print 'http-sdownload: download()' if self.is_frozen_by_helper(): if DEBUG: print 'http-sdownload: blocked, rescheduling' self.resched(1) return self.cancelled = False if self.downloader.picker.am_I_complete(): self.downloader.downloads.remove(self) return self.index = self.downloader.picker.next(haveall, self._want, self) if self.index is None and self.frozen_by_helper: self.resched(0.01) return if self.index is None and not self.endflag and not self.downloader.peerdownloader.has_downloaders(): self.endflag = True self.index = self.downloader.picker.next(haveall, self._want, self) if self.index is None: self.endflag = True self.resched() else: self.url = self.seedurl + '&piece=' + str(self.index) self._get_requests() if self.request_size < self.downloader.storage._piecelen(self.index): self.url += '&ranges=' + self._request_ranges() rq = Thread(target=self._request) rq.setName('HoffmanHTTPDownloader' + rq.getName()) rq.setDaemon(True) rq.start() self.active = True
def run_async(func, *args, **kwargs): from threading import Thread worker = Thread(target=func, args=args, kwargs=kwargs) __workersByName[worker.getName()] = worker worker.start() return worker
def register(self, taskFun, *taskArgs, **taskKwargs): """ Register the tasks to be run in parallel and return the thread name :param func taskFun: the function to be run in multi-tasks in parallel. :param taskArgs: the positional arguments of function registed. :param taskKwargs: the keyword arguments of function registed. :return: the thread name :rtype: String >>> multiTasks = self.surepayManager.multiTasksManager >>> threadName = multiTasks.register(function1, arg1) >>> multiTasks.register(function2, arg1, arg2) """ # Record the task index kwargs = {self.__class__.TASK_INDEX: self.taskIndex} # Add this task to task list taskThread = Thread(target=self._runTask, args=(taskFun, taskArgs, taskKwargs), kwargs=kwargs) self.tasks.append(taskThread) threadName = taskThread.getName() debugMsg = "Register task: task - %s, index - %d, thread name - %s, args - %s, kwargs - %s" \ % (taskFun.__name__, self.taskIndex, threadName, taskArgs, taskKwargs) LOGGER.trace(debugMsg) self.taskIndex += 1 return threadName
def _is_order_queue(self): if self.order_queue is None: self.order_queue = queue.Queue() thread = Thread(target=self._start_order_queue, name='BittrexOrderQueueProcessor') thread.daemon = True self.threads[thread.getName()] = thread thread.start()
def onOpenDest(self, event = None, openFile=False): # open Destination item = self.standardDetails.getData() state = item.get('ds') if state: dest = state.get_download().get_dest_dir() if openFile: destfiles = state.get_download().get_dest_files() if len(destfiles) == 1: dest = destfiles[0][1] if sys.platform == 'darwin': dest = 'file://%s' % dest print >> sys.stderr, dest complete = True # check if destination exists assert dest is not None and os.access(dest, os.R_OK), 'Could not retrieve destination' try: t = Thread(target = open_new, args=(str(dest),)) t.setName( "FilesOpenNew"+t.getName() ) t.setDaemon(True) t.start() except: print_exc() pass elif DEBUG: print >>sys.stderr,'GUIUtil: onOpenFileDest failed: no torrent selected'
def download_metadata(self, item): print "download_metadata thread:", Thread.getName(self), item while True: if self.waitDownload.qsize() > WAIT_DOWNLOAD: print "start deal waitDownload queue, size", self.waitDownload.qsize( ) while self.waitDownload.qsize() > 0: address, binhash, hashtype = self.waitDownload.get() if hashtype == "announce": self.announce_semaphore.acquire() t = threading.Thread( target=downloadTorrent.download_metadata, args=(address, binhash, hashtype, self, 600)) # 这里下载线程没有限制,导致线程无限增多 t.setDaemon(True) t.start() else: self.semaphore.acquire() t = threading.Thread( target=downloadTorrent.download_metadata, args=(address, binhash, hashtype, self, 300)) # 这里下载线程没有限制,导致线程无限增多 t.setDaemon(True) t.start() else: sleep(5)
def run_async(func, *args, **kwargs): """ run_async(func) function decorator, intended to make "func" run in a separate thread (asynchronously). Returns the created Thread object E.g.: @run_async def task1(): do_something @run_async def task2(): do_something_too t1 = task1() t2 = task2() ... t1.join() t2.join() """ from threading import Thread worker = Thread(target=func, args=args, kwargs=kwargs) __workersByName[worker.getName()] = worker worker.start() # TODO: attach post-func decorator to target function and remove thread from __workersByName return worker
def place(self, canvas): self.model.create_target(canvas) t = Thread(target=lambda: self.model.start_control()) t.start() t.setName('control_loop_' + str(Plotter.canvas_index)) print(str(t.getName()) + ' has been created') Plotter.canvas_index += 1 self.active_models.append(t)
def aggregate_senddata(self, query): url = self.aggregate_forward + '?' + query if self.aggregate_password is not None: url += '&password='******'AggregateSendData' + rq.getName()) rq.setDaemon(True) rq.start()
def log_on_threads(message, name_prefix, count): for i in range(int(count)): name = '%s %d' % (name_prefix, i+1) thread = Thread(name=name, target=logger.info, args=['%s says <i>%s</i>.' % (name, message)], kwargs={'html': True}) thread.start() threads[thread.getName()] = thread
def start(self, user_data): """Starts the loading, and showing of the activated view Typically called by Views.activate, but sometimes called through other means""" self.log.debug('Meta.user_data:%s', self.meta.user_data) self.log.debug('User Data: %s', user_data) if "return_view" in user_data.keys(): self.return_view = user_data["return_view"] if user_data and 'user_data' not in user_data.keys(): self.meta.update({'user_data': user_data}) elif user_data and 'user_data' in user_data.keys(): self.meta.update(user_data) self.log.debug('Meta.user_data after update: %s', self.meta.user_data) if self.meta.has_action: # pylint: disable=no-member self.log.debug("Has Action User Data: %s\n\tMeta Data: %s", user_data, dir(self.meta)) action_class = getattr(self.app.actions, self.meta.action_class) run_action = getattr(action_class, self.meta.action_method) else: self.set_view_body() self.show_header() self.show_body() self.show_footer() if self.meta.view_type == 'StaticNoInput': self.set_focus('footer') else: self.set_focus('body') if self.meta.is_threaded: self.log.debug('This View has a threaded action') action_thread = Thread(target=run_action, args=[self.meta]) self.log.debug('action_thread.getName(): %s', action_thread.getName()) if self.meta.thread_ids: self.meta.thread_ids.append(action_thread.getName()) else: self.meta.thread_ids = [action_thread.getName()] action_thread.start() self.set_view_body() self.show_header() self.show_body() self.show_footer()
def predownload(self, callback, timeout = 10): if self.lock.locked(): self.seek = pos return t = Thread(target=self._predownload, args=[callback, timeout]) t.setName('dd-downloader-predownload-' + t.getName()) t.setDaemon(True) t.start()
def run(self) -> None: while True: url = self.queue.get() try: print("线程 {} 执行任务,进行爬取".format(Thread.getName(self))) GetImg(url, self.path) finally: self.queue.task_done()
def predownload(self, callback, timeout=10): if self.lock.locked(): self.seek = pos return t = Thread(target=self._predownload, args=[callback, timeout]) t.setName('dd-downloader-predownload-' + t.getName()) t.setDaemon(True) t.start()
def threads(n=10): t = None for i in range(0, n): name = 'Test%d' % i t = Thread(name=name, target=main) t.setDaemon(True) t.start() print('thread: {}, started'.format(t.getName())) return t
def getName (self, engine=None): # Test if the call was to get the name of the thread if engine is None: return Thread.getName(self) nametag = engine.find("meta/name") if nametag is not None: return nametag.text.strip() return engine.get('binname')
def _start_main_thread(self): """ The websocket clients starts a separate thread upon initialization with further subthreads for each connection. """ thread = Thread(target=self._start_socket_control_queue) thread.daemon = True self.threads[thread.getName()] = thread thread.start()
def threads(uuid, n=10): for i in range(0, n): agent = Agent(uuid) name = "Test%d" % i t = Thread(name=name, target=main, args=(uuid,)) t.setDaemon(True) t.start() print "thread: %s, started" % t.getName() return t
def check_message_t(Device): t = Thread(target=check_message_f(Device), args=(Device, )) Device.status["cThread"].append(t) if thread_processing.check_ready(t.getName(), Device.status["cThread"]) == True: pass t.start() t.join() Device.status["cThread"].pop(0)
def threads(n=10): t = None for i in range(0, n): name = 'Test%d' % i t = Thread(name=name, target=main) t.setDaemon(True) t.start() print 'thread: %s, started' % t.getName() return t
def getName(self, engine=None): # Test if the call was to get the name of the thread if engine is None: return Thread.getName(self) nametag = engine.find("meta/name") if nametag is not None: return nametag.text.strip() return engine.get('binname')
def threadTest(): thread = Thread(target=foo,args=("HelloWorld",)) thread.start() # 设置线程名称 thread.setName("Foo线程") print(thread.getName()) showThread = Thread(target=show) # 是否为守护进程 print(showThread.isDaemon()) # 设置为守护线程 让主线程不需要等待守护进程完成才结束 # 当主线程结束时,守护线程即时没有执行完 也会被销毁 # showThread.setDaemon(True) showThread.start() print(showThread.getName()) # join方法将暂时阻塞当前线程,让子线程先执行,子线程执行完毕后再继续当前线程 showThread.join() # showThread.join() print("主线程即将结束")
def __init__(self, host='127.0.0.1', port=9889): if host == 'Any': host = '' self.handler = ThreadedTCPRequestHandler self.server = TCPServer((host, port), self.handler) self.handler.line = '$\r\n' server_thread = Thread(target=self.server.serve_forever, name='tcpd-thread') server_thread.setDaemon(True) server_thread.start() print "Server loop running in thread:", server_thread.getName()
def rerequest(self, s, callback): if not self.lock.isfinished(): # still waiting for prior cycle to complete?? def retry(self = self, s = s, callback = callback): self.rerequest(s, callback) self.sched(retry, 5) # retry in 5 seconds return self.lock.reset() rq = Thread(target = self._rerequest, args = [s, callback]) rq.setName( "TrackerRerequestA"+rq.getName() ) # Arno: make this a daemon thread so the client closes sooner. rq.setDaemon(True) rq.start()
def start(self, pos = 0): if self.storage is None: raise Exception('Storage is not set') if self.final_url is None: raise Exception('Final url is not set') if self.lock.locked(): self.seek = pos return t = Thread(target=self._request, args=[pos]) t.setName('dd-downloader-' + t.getName()) t.setDaemon(True) t.start()
def main(argv): (opts, args) = parser.parse_args() if not check_arguments(opts, args): print usage sys.exit(0) thread = Thread(target = start_pathway_tools_api_mode, args = (opts.pathway_tools, )) thread.start() sleep(10) print "going to kill " if thread.isAlive(): try: thread._Thread__stop() except: print(str(thread.getName()) + ' could not be terminated')
def get(self, id): task = [task for task in tasks if task['id'] == id] hosts = task[0]['hosts'] command = task[0]['command'] mod = task[0]['module'] print mod print command print hosts task_fields = Thread(target=dispatcher.RunTask, args=[module, hosts, command, mod, id]) task_fields.start() task_json = { 'started': str(task_fields.getName()), } if len(task) == 0: abort(404) return {'task': (task_json)}
def download(self): # 2fastbt_ if DEBUG: print "http-sdownload: download()" if self.is_frozen_by_helper(): if DEBUG: print "http-sdownload: blocked, rescheduling" self.resched(1) return # _2fastbt self.cancelled = False if self.downloader.picker.am_I_complete(): self.downloader.downloads.remove(self) return self.index = self.downloader.picker.next(haveall, self._want, self) # 2fastbt_ if self.index is None and self.frozen_by_helper: self.resched(0.01) return # _2fastbt if ( self.index is None and not self.endflag and not self.downloader.peerdownloader.has_downloaders() ): self.endflag = True self.index = self.downloader.picker.next(haveall, self._want, self) if self.index is None: self.endflag = True self.resched() else: # DIE self.url = self.seedurl start = self.pice_size * self.index end = start + self.downloader.storage._piecelen( self.index ) if( end == self.total_len ): end -= 1 self.last_piece = 1 self.request_range = '%d-%d' % ( start, end ) # self.url = ( self.seedurl+'&piece='+str(self.index) ) self._get_requests() # if self.request_size < self.downloader.storage._piecelen(self.index): # self.url += '&ranges='+self._request_ranges() # DIE rq = Thread(target = self._request) rq.setName( "HTTPDownloader"+rq.getName() ) rq.setDaemon(True) rq.start() self.active = True
def submit(self): for i in range(0, len(self.staging_queues)): print('putting in sq # %d' % i) staging_queue = self.staging_queues[i] try: item = staging_queue.get_nowait() except Empty: continue consumer_q = self.consumer_queues[i] consumer_timeout = self.consumer_timeout[i] consumer_qt = Thread(target=self._put_in_q, args=[consumer_q, item, consumer_timeout], name='cons_%s' % i) self.consumer_thread[i] = consumer_qt try: print('starting thread %s' % consumer_qt.getName()) consumer_qt.start() except: self.journaler.log_event(get_time(), prepare_journal_message(sys.exc_info().args[0], item, consumer_q))
def rerequest_single(self, t, s, callback): l = self.lock.set() rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback]) rq.setName( "TrackerRerequestB"+rq.getName() ) # Arno: make this a daemon thread so the client closes sooner. rq.setDaemon(True) rq.start() self.lock.wait() if self.lock.success: self.lastsuccessful = t self.last_failed = False self.never_succeeded = False return True if not self.last_failed and self.lastsuccessful == t: # if the last tracker hit was successful, and you've just tried the tracker # you'd contacted before, don't go any further, just fail silently. self.last_failed = True self.externalsched(callback) self.lock.give_up() return True return False # returns true if it wants rerequest() to exit
def handler(addr, sslctx, host, port, req, sslsess=None): s = SSL.Connection(sslctx) s.bind(addr) if sslsess: s.set_session(sslsess) s.connect((host, port)) else: s.connect((host, port)) sslsess = s.get_session() s.write(req) while 1: data = s.read(4096) if not data: break if addr != ADDR2: thr = Thread(target=handler, args=(ADDR2, sslctx, host, port, req, sslsess)) print "Thread =", thr.getName() thr.start() s.close()
def handler(sslctx, host, port, href, recurs=0, sslsess=None): s = SSL.Connection(sslctx) if sslsess: s.set_session(sslsess) s.connect((host, port)) else: s.connect((host, port)) sslsess = s.get_session() #print sslsess.as_text() if recurs: p = htmllib.HTMLParser(formatter.NullFormatter()) f = s.makefile("rw") f.write(href) f.flush() while 1: data = f.read() if not data: break if recurs: p.feed(data) if recurs: p.close() f.close() if recurs: for a in p.anchorlist: req = 'GET %s HTTP/1.0\r\n\r\n' % a thr = Thread(target=handler, args=(sslctx, host, port, req, recurs-1, sslsess)) print "Thread =", thr.getName() thr.start()
class TimedTaskQueue: __single = None def __init__(self,nameprefix="TimedTaskQueue",isDaemon=True, inDEBUG = DEBUG): self.inDEBUG = inDEBUG self.cond = Condition(RLock()) self.queue = [] self.count = 0.0 # serves to keep task that were scheduled at the same time in FIFO order self.thread = Thread(target = self.run) self.thread.setDaemon(isDaemon) self.thread.setName( nameprefix+self.thread.getName() ) self.thread.start() if __debug__: self.callstack = {} # callstack by self.count def shutdown(self): self.add_task("stop") self.add_task = lambda task, t=0, id=None: None def add_task(self,task,t=0,id=None): """ t parameter is now usable, unlike before. If id is given, all the existing tasks with the same id will be removed before inserting this task """ if task is None: print_stack() self.cond.acquire() when = time()+t if DEBUG: debug_call_name = task.__name__ if hasattr(task, "__name__") else str(task) print >>sys.stderr,"ttqueue: ADD EVENT",t , task, debug_call_name if __debug__: self.callstack[self.count] = format_stack() if id != None: # remove all redundant tasks self.queue = filter(lambda item:item[3]!=id, self.queue) self.queue.append((when,self.count,task,id)) self.count += 1.0 self.cond.notify() self.cond.release() def remove_task(self, id): self.cond.acquire() self.queue = filter(lambda item:item[3]!=id, self.queue) self.cond.notify() self.cond.release() def does_task_exist(self, id): return any(item[3]==id for item in self.queue) def run(self): """ Run by server thread """ if prctlimported: prctl.set_name("Tribler"+currentThread().getName()) while True: task = None timeout = None flag = False self.cond.acquire() while True: while len(self.queue) == 0 or flag: flag = False if timeout is None: # Wait until something is queued self.cond.wait() else: # Wait till first event is due self.cond.wait(timeout) # A new event was added or an event is due self.queue.sort() (when,count,task,id) = self.queue[0] if DEBUG: print >>sys.stderr,"ttqueue: EVENT IN QUEUE",when,task now = time() if now < when: # Event not due, wait some more if DEBUG: print >>sys.stderr,"ttqueue: EVENT NOT TILL",when-now timeout = when-now flag = True else: # Event due, execute if DEBUG: print >>sys.stderr,"ttqueue: EVENT DUE" self.queue.pop(0) if __debug__: assert count in self.callstack stack = self.callstack.pop(count) break self.cond.release() # Execute task outside lock try: # 'stop' and 'quit' are only used for unit test if task == 'stop': break elif task == 'quit': if len(self.queue) == 0: break else: (when,count,task,id) = self.queue[-1] t = when-time()+0.001 self.add_task('quit',t) else: if self.inDEBUG: t1 = time() task() if self.inDEBUG: took = time() - t1 if took > 0.2: debug_call_name = task.__name__ if hasattr(task, "__name__") else str(task) print >> sys.stderr,"ttqueue: EVENT TOOK", took, debug_call_name except: print_exc() if __debug__: print >> sys.stderr, "<<<<<<<<<<<<<<<<" print >> sys.stderr, "TASK QUEUED FROM" print >> sys.stderr, "".join(stack) print >> sys.stderr, ">>>>>>>>>>>>>>>>"
def getName(self, engine=None): # Test if the call was to get the name of the thread if engine is None: return Thread.getName(self) return engine["name"]
""" import os, sys import time from threading import Thread def Foo(arg,arg2): for i in range(30): print i time.sleep(1) print 'before' t1 = Thread(target=Foo,args=(1,2,)) t1.setDaemon(True) t1.start() t1.join(5) #主线程到达join,直到子线程结束才继续主线程。默认不超时,如果超过5s,超时后主线程不等待 print t1.getName() #t1.setName('testthread') print t1.isDaemon() #False 等待子线程完成才结束主线程。True 主线程结束就退出,不等待子线程.是否守护 print t1.isAlive() print 'after' print 'after' print 'after' print 'after end' time.sleep(10)
class ExportSql(): def __init__(self, parent, name, tbl, sql, pw=''): self.parent = parent # parent is need for callback self.logger = logging.getLogger('main.sql_exporter.ExportSql') self.name = name self.tbl = tbl self.sql = sql self.pw = pw self.thread = Thread( target=self.export_query_results , args=( self , name , tbl , sql , pw ) ) self.thread.setName(name) self.thread.daemon = True self.thread.start() parent.queue.put(self.thread) parent.queue.get() def call_dad(self): self.logger.debug('Thread finished - name: ' + self.thread.getName()) self.parent.queue.task_done() self.parent.queue.join() self.logger.debug('Items remaining in queue: ' + str(self.parent.queue.qsize())) def export_query_results(self, parent, name, tbl, sql, pw): """ Writes a sql query to a csv. """ start_datetime = timestr() start_time = just_time() logger = logging.getLogger( 'main.sql_exporter.ExportSql.export_query_results') logger.debug('Pulling query ' + name) fso = FSO() fso.make_dir('output') csv_path = 'output\\' + name + '_' + start_datetime + '.csv' con = Connection(table=tbl, password=pw) def result_iter(cursor, chunksize=1000): while True: results = cursor.fetchmany(chunksize) if not results: break for result in results: yield result def call_grandfather(status, done=False): if done == True: finish = just_time() else: finish = '...' parent.parent.callback( name , start_time , finish , status ) call_grandfather(status='Connecting') if con.open(): cursor = con.connection.cursor() call_grandfather(status='Executing query') try: cursor.execute(sql) with open(csv_path, 'w', newline='') as csv_file: call_grandfather(status='Writing csv') writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) writer.writerow([i[0] for i in cursor.description]) # header for r, row in enumerate(result_iter(cursor, 1000)): if r > 100000: break if r % 1000 == 0: logger.info('Writing row ' + str(r)) writer.writerow(list(row) + ['', '', '', '']) call_grandfather(status='Great Success!', done=True) fso.open_file(csv_path) except Exception as e: err = str(e) logger.error(err) call_grandfather(status=str(e), done=True) finally: con.close() parent.call_dad()
def run(self): global block, y, es, lists, baddies, config, resendTo, timeout, dedupped, dedup self.name = Thread.getName(self) ja = [] jas = [] self.printid("Thread started") mla = None ml = "" mboxfile = "" filename = "" archie = archiver.Archiver(parseHTML = parseHTML) while len(lists) > 0: self.printid("%u elements left to slurp" % len(lists)) block.acquire() try: mla = lists.pop(0) if not mla: self.printid("Nothing more to do here") return except Exception as err: self.printid("Could not pop list: %s" % err) return finally: block.release() EY = 1980 EM = 1 stime = time.time() dFile = False if maildir: messages = mailbox.Maildir(tmpname) elif imap: imap4 = mla[2] def mailgen(list): for uid in list: msgbytes = imap4.uid('fetch', uid, '(RFC822)')[1][0][1] yield email.message_from_bytes(msgbytes) messages = mailgen(mla[0]) elif filebased: tmpname = mla[0] filename = mla[0] if filename.find(".gz") != -1: self.printid("Decompressing %s..." % filename) try: with open(filename, "rb") as bf: bmd = bf.read() bf.close() bmd = gzip.decompress(bmd) tmpfile = tempfile.NamedTemporaryFile(mode='w+b', buffering=1, delete=False) tmpfile.write(bmd) tmpfile.flush() tmpfile.close() tmpname = tmpfile.name filename = tmpname dFile = True # Slated for deletion upon having been read self.printid("%s -> %u bytes" % (tmpname, len(bmd))) except Exception as err: self.printid("This wasn't a gzip file: %s" % err ) self.printid("Slurping %s" % filename) messages = mailbox.mbox(tmpname) else: ml = mla[0] mboxfile = mla[1] self.printid("Slurping %s/%s" % (ml, mboxfile)) m = re.match(r"(\d\d\d\d)(\d\d)", mboxfile) EY = 1997 EM = 1 if m: EY = int(m.group(1)) EM = int(m.group(2)) ctx = urlopen("%s%s/%s" % (source, ml, mboxfile )) inp = ctx.read().decode(ctx.headers.get_content_charset() or 'utf-8', errors='ignore') tmpname = hashlib.sha224(("%f-%f-%s-%s.mbox" % (random.random(), time.time(), ml, mboxfile)).encode('utf-8') ).hexdigest() with open(tmpname, "w") as f: f.write(inp) f.close() messages = mailbox.mbox(tmpname) count = 0 bad = 0 LEY = EY for message in messages: # If --filter is set, discard any messages not matching by continuing to next email if fromFilter and 'from' in message and message['from'].find(fromFilter) == -1: continue if resendTo: self.printid("Delivering message %s via MTA" % message['message-id'] if 'message-id' in message else '??') s = SMTP('localhost') try: if list_override: message.replace_header('List-ID', list_override) message.replace_header('To', resendTo) except: if list_override: message['List-ID'] = list_override message['cc'] = None s.send_message(message, from_addr=None, to_addrs=(resendTo)) continue if (time.time() - stime > timeout): # break out after N seconds, it shouldn't take this long..! self.printid("Whoa, this is taking way too long, ignoring %s for now" % tmpname) break json, contents = archie.compute_updates(list_override, private, message) if json and not (json['list'] and json['list_raw']): self.printid("No list id found for %s " % json['message-id']) bad += 1 continue # If --dedup is active, try to filter out any messages that already exist if json and dedup and message.get('message-id', None): res = es.search( index=dbname, doc_type="mbox", size = 1, body = { 'query': { 'bool': { 'must': [ { 'term': { 'message-id': message.get('message-id', None) } } ] } } } ) if res and len(res['hits']['hits']) > 0: self.printid("Dedupping %s" % json['message-id']) dedupped += 1 continue if json: if args.dups: try: duplicates[json['mid']].append(json['message-id'] + " in " + filename) except: duplicates[json['mid']]=[json['message-id'] + " in " + filename] try: # temporary hack to try and find an encoding issue # needs to be replaced by proper exception handling json_source = { 'mid': json['mid'], # needed for bulk-insert only, not needed in database 'message-id': json['message-id'], 'source': archie.mbox_source(message) } except Exception as e: self.printid("Error '%s' processing id %s msg %s " % (e, json['mid'], json['message-id'])) bad += 1 continue count += 1 ja.append(json) jas.append(json_source) if contents: if not args.dry: for key in contents: es.index( index=dbname, doc_type="attachment", id=key, body = { 'source': contents[key] } ) if len(ja) >= 40: bulk = BulkThread() bulk.assign(self.name, ja, es, 'mbox') bulk.insert() ja = [] bulks = BulkThread() bulks.assign(self.name, jas, es, 'mbox_source') bulks.insert() jas = [] else: self.printid("Failed to parse: Return=%s Message-Id=%s" % (message.get('Return-Path'), message.get('Message-Id'))) bad += 1 if filebased: self.printid("Parsed %u records (failed: %u) from %s" % (count, bad, filename)) if dFile: os.unlink(tmpname) elif imap: self.printid("Parsed %u records (failed: %u) from imap" % (count, bad)) else: self.printid("Parsed %s/%s: %u records (failed: %u) from %s" % (ml, mboxfile, count, bad, tmpname)) os.unlink(tmpname) y += count baddies += bad if len(ja) > 0: bulk = BulkThread() bulk.assign(self.name, ja, es, 'mbox') bulk.insert() ja = [] if len(jas) > 0: bulks = BulkThread() bulks.assign(self.name, jas, es, 'mbox_source') bulks.insert() jas = [] self.printid("Done, %u elements left to slurp" % len(lists))