def runThreads(numThreads, threadFunction, cleanupFunction=None, forwardException=True, threadChoice=False, startThreadMsg=True): threads = [] kb.multiThreadMode = True kb.threadContinue = True kb.threadException = False if threadChoice and numThreads == 1 and not (kb.injection.data and not any(_ not in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED) for _ in kb.injection.data)): while True: message = "please enter number of threads? [Enter for %d (current)] " % numThreads choice = readInput(message, default=str(numThreads)) if choice: skipThreadCheck = False if choice.endswith('!'): choice = choice[:-1] skipThreadCheck = True if choice.isdigit(): if int(choice) > MAX_NUMBER_OF_THREADS and not skipThreadCheck: errMsg = "maximum number of used threads is %d avoiding potential connection issues" % MAX_NUMBER_OF_THREADS logger.critical(errMsg) else: conf.threads = numThreads = int(choice) break if numThreads == 1: warnMsg = "running in a single-thread mode. This could take a while" logger.warn(warnMsg) try: if numThreads > 1: if startThreadMsg: infoMsg = "starting %d threads" % numThreads logger.info(infoMsg) else: threadFunction() return # Start the threads for numThread in xrange(numThreads): thread = threading.Thread(target=exceptionHandledFunction, name=str(numThread), args=[threadFunction]) setDaemon(thread) try: thread.start() except thread.error, ex: errMsg = "error occurred while starting new thread ('%s')" % ex.message logger.critical(errMsg) break threads.append(thread) # And wait for them to all finish alive = True while alive: alive = False for thread in threads: if thread.isAlive(): alive = True time.sleep(0.1)
def mysqlDictAttack(host, dic, user="******", db='', threadnumber=20): print "Starting attack with ", threadnumber, " of threads" entries = loadDictionary(dic) ctx = {} ctx['counter'] = 0 ctx['found'] = False threads = [] lock = threading.Lock() t = None try: for i in range(threadnumber): print "Creating cracker thread" t = Worker(host, user, ctx, lock, entries, i) t.daemon = True t.start() threads.append(t) #Workaround to join problem and sigints... while True: if not any([thread.isAlive() for thread in threads]): break else: time.sleep(1) except KeyboardInterrupt: print "Received keyboard interrupt" for t in threads: t.kill_received = True
def run(self): if self.getConn(): self.down_parse() for thread in self.threads: if thread.isAlive(): thread.join() if self.imap_server.state == 'AUTH': self.imap_server.logout() else: print self.imap_server.state self.imap_server.close() self.imap_server.logout() logging.info("task: %s parse success" % self.username) file_num = 0 file_dir = self.files_path + "\\parsed" for root, dirs, files in os.walk(file_dir): for file in files: ext = file.split(".")[-1] #print file, ext if ext != "eml": file_num = file_num + 1 #print file_num data = {"username": self.username, "status": "parse", "result":"finished", "file_num":str(file_num)} info_str = json.dumps(data) send_msg(info_str) else: data = {"username": self.username, "status": "login", "result": "error"} info_str = json.dumps(data) send_msg(info_str)
def join_terminated_client_threads(): for thread in client_threads: if not thread.isAlive(): log.debug("Client thread %s is not alive, joining it" % thread.getName) thread.join() log.debug("All client threads not alive have been joined now")
def run(self): if self.getConn(): self.down_parse() self.pop_server.quit() for thread in self.threads: if thread.isAlive(): thread.join() file_num = 0 file_dir = self.rootDir + "\\parsed" for root, dirs, files in os.walk(file_dir): for file in files: ext = file.split(".")[-1] #print file, ext if ext != "eml": file_num = file_num + 1 print file_num replyToWeb(self.taskId, 'finished', 'parse', self.username, str(file_num)) for ins in Controller.Controller.threads[self.taskId]: if ins.kill: Controller.Controller.q.acquire() Controller.Controller.threads[self.taskId].remove(ins) Controller.Controller.q.release() if len(Controller.Controller.threads[self.taskId]) == 0: Controller.Controller.q.acquire() Controller.Controller.taskIdList.remove(self.taskId) Controller.Controller.q.release() else: return False
def connect_proxy(): global thread # Starts the proxy in a separate thread if not thread.isAlive(): print "Starting Thread" thread = WebProxyStart() thread.start()
def __call__(self, *args, **kwArgs): thread = TimeoutHelperThread(self._func, args, kwArgs, name=self._name) thread.start() thread.join(self._timeout) if thread.isAlive(): raise chakana.error.Timeout(thread, self._timeout) if thread.error is None: return thread.result raise chakana.error.ChildException(thread.error, thread.exc_info)
def stop_all_threads(self, block=False): """ Stops all threads. If block is True then actually wait for the thread to finish (may block the UI) """ for thread in self.fooThreads.values(): thread.cancel() if block: if thread.isAlive(): thread.join()
def _enable_text_view_widget(field, thread_list): for thread in thread_list: if thread.isAlive(): return True status = True for thread in thread_list: status = status and thread.getStatus() field.set_sensitive(status) if not status: self.__push_status_message('<span font_desc="belgrano 10"><b> Login Failed - Network Error</b></span>') return False
def _xray_frame_process(queue, linger=True, wait=None): """The _xray_frame_process() function starts the viewer in a separate thread. It then continuously reads data from @p queue and dispatches update events to the viewer. The function returns when it reads a @c None object from @p queue or when the viewer thread has exited. """ from Queue import Empty import rstbx.viewer # Start the viewer's main loop in its own thread, and get the # interface for sending updates to the frame. thread = _XrayFrameThread() send_data = thread.send_data while True: try: payload = queue.get(timeout=1) if payload is None: if linger: thread.join() else: thread.stop() return if not thread.isAlive(): thread.join() return if wait is not None: time.sleep(wait) # All kinds of exceptions--not just PyDeadObjectError--may occur # if the viewer process exits during this call. XXX This may be # dangerous! try: send_data(rstbx.viewer.image(payload[0]), payload[1]) except Exception: pass except Empty: pass
def start(self, thread_num): signal.signal(signal.SIGINT, self.sigint_handler) #self.scrawler(self.m_startUrl,1) self.myqueue.put(self.m_startUrl) self.m_urlId = 1 self.m_id2urlMap[1] = self.m_startUrl self.m_url2idMap[self.m_startUrl] = 1 threads = [] for i in range(thread_num): t = threading.Thread(target = self.scrawler2,args=()) t.setDaemon(True) threads.append(t) t.start() while True: alive = False for thread in threads: alive = alive or thread.isAlive() if not alive: break
def reload(self): response = self._make_request(requests.get, path="stats?nohtml=1&format=json") if not response: return None data = simplejson.loads(response.content) task_data = {} new_tasks = {} for key, value in data.iteritems(): try: task_name, metric = key.split('-') except: self.metadata[key] = value continue if not task_name in task_data: task_data[task_name] = {} task_data[task_name][metric] = value # Update all tasks in parallel update_threads = [] for task_name in task_data.keys(): task_dict = task_data[task_name] new_tasks[task_dict['name']] = task_dict if task_dict['running']: t = threading.Thread(target=self.run_update_task_data, args=[new_tasks, task_dict['name']]) t.start() update_threads.append(t) while update_threads: for thread in update_threads: if not thread.isAlive(): update_threads.remove(thread) time.sleep(0.0001) self.tasks = new_tasks return self.tasks
def terminate_thread(self, thread): """ Terminates a python thread from another thread. 中止结束线程 :param thread: a threading.Thread instance """ try: if not thread.isAlive(): return exc = ctypes.py_object(SystemExit) res = ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(thread.ident), exc) if res == 0: raise ValueError("nonexistent thread id") elif res > 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) raise SystemError("PyThreadState_SetAsyncExc failed") except e: pass self.cmd_vel_pub.publish(Twist())
def run(self): self.rootDir = os.path.dirname(self.db_path) emlCount = 0 for root, dirs, files in os.walk(self.srcPath): total = len(files) step = 100 rate = total / step for tt in range(rate + 1): min = tt * step if (tt + 1) * step > total: max = total else: max = (tt + 1) * step tempList = files[min:max] for emlName in tempList: if self.kill is False: if emlName[-4:] == u'.eml': emlPath = os.path.join(root, emlName) #try: # print 'eml is', emlPath #except Exception, e: # traceback.print_exc() parser = EMLParser.Parse(self.db_path, self.taskId, emlPath, emlCount, '', self.rootDir) try: parser.start() except Exception, e: print e print emlPath self.threads.append(parser) print emlCount emlCount += 1 for thread in self.threads: if thread.isAlive(): thread.join()
class Iterator(threading.Thread): def __init__(self, db_path, taskId, srcPath): threading.Thread.__init__(self) self.kill = False self.db_path = db_path self.taskId = taskId self.srcPath = srcPath self.threads = [] def run(self): emlCount = 0 for root, dirs, files in os.walk(self.srcPath): for emlName in files: if self.kill is False: if emlName[-4:] == u'.eml': emlPath = os.path.join(root, emlName) try: print 'eml is', emlPath except Exception, e: traceback.print_exc() emlCount += 1 parser = EMLParser.Parse(self.db_path, self.taskId, emlPath, emlCount, '') parser.start() self.threads.append(parser) parser.join() for thread in self.threads: if thread.isAlive(): thread.join() replyToWeb(self.taskId) Controller.Controller.q.acquire() Controller.Controller.threads[self.taskId] = None Controller.Controller.taskIdList.remove(self.taskId) Controller.Controller.q.release() self.kill = True
#print hash_name emlpath = self.rootDir + "\\" + self.username + "\\" + hash_name + ".eml" dir = os.path.dirname(emlpath) if os.path.exists(dir) is not True: os.makedirs(dir) if os.path.exists(emlpath) is not True: with open(emlpath, 'wb') as outf: outf.write(data) replyToWeb(self.taskId, "success", "download", self.username, '') parser = EMLParser.Parse(self.db_path, self.taskId, emlpath, msg_id, self.username, self.rootDir) parser.start() self.threads.append(parser) for thread in self.threads: if thread.isAlive(): thread.join(timeout=10) self.kill = True class IMAPTool(threading.Thread): def __init__(self, username, password, server, port, useSSL, rootDir, taskId): threading.Thread.__init__(self) self.username = username self.password = password self.server = server self.port = port self.useSSL = useSSL self.rootDir = rootDir self.taskId = taskId
import time # Define a function for the thread def print_time(threadName, delay): count = 0 while count < 5: time.sleep(delay) count += 1 print "%s: %s" % (threadName, time.ctime(time.time())) # Create two threads as follows try: thread.start_new_thread(print_time, ( "Thread-1", 2, )) thread.start_new_thread(print_time, ( "Thread-2", 4, )) except: print "Error: unable to start thread" count = 0 while count < 20: thread.isAlive() count += 1 sleep.time(1)
def start(self, *w): import time begintime = time.time() def show_result(): currenttime = time.time() len_result = len(result) if len_result: server, value = result[-1] if len_result == 0: return #display progress total = len(candidate_repos) progress = float(len_result) / total self.progress_bar.set_fraction(progress) self.progress_bar.set_text('%s / %s' % (len_result, total)) #display text if isinstance(value, float): text = _( "<span color='black'>Response time of %(server)s is %(value).0f ms.</span>" ) % { 'server': server, 'value': value } elif value == 'cannot ping' or value == 'unreachable': text = _("<span color='black'>Server %s is unreachable.</span>" ) % server self.progress_label.set_markup(text) #display time left if len_result >= 40 and len_result % 5 == 0: timeleft = float(total - len_result) * ( currenttime - begintime) / (len_result) text = _("<span color='black'>Time left: %s</span>" ) % derive_time(int(timeleft)) self.timeleft_label.set_markup(text) self.refresh_GUI() import threading result = [] threads = [] candidate_repos = get_candidate_repositories() servers = [e[3] for e in candidate_repos] urls = [e[2] for e in candidate_repos] #PING servers for url, server in zip(urls, servers): def alive_threads(threads): i = 0 for t in threads: if t.isAlive(): i += 1 return i while alive_threads(threads) > 10: import time time.sleep(0.1) show_result() if self.can_skip: self.destroy() while gtk.events_pending(): gtk.main_iteration() return thread = PingThread(url, server, result) threads.append(thread) thread.start() for thread in threads: if not thread.isAlive(): continue thread.join() show_result() if self.can_skip: self.destroy() while gtk.events_pending(): gtk.main_iteration() return #decide fastest server min_time = 1e8 fastest_server = None for (server, time) in result: if not isinstance(time, float): continue if time < min_time: min_time = time fastest_server = server try: if not fastest_server: return self.progress_label.set_text( _('In order to use the fastest repository, Ailurus will change /etc/apt/sources.list' )) self.progress_bar.set_text( _('The fastest repository is %s.') % fastest_server) for e in candidate_repos: if e[3] == fastest_server: new_url = e[2] break #check whether repositories should be changed for repos in APTSource2.official_urls(): assert ':' in repos if repos != new_url: break else: return self.change_server(new_url) except: print_traceback() finally: #destroy dialog self.can_exit = True self.destroy()
def run_async(self, sockopt=None, sslopt=None, ping_interval=0, ping_timeout=None, http_proxy_host=None, http_proxy_port=None, http_no_proxy=None, http_proxy_auth=None, skip_utf8_validation=False, host=None, origin=None): """ run event loop for WebSocket framework. This loop is infinite loop and is alive during websocket is available. sockopt: values for socket.setsockopt. sockopt must be tuple and each element is argument of sock.setsockopt. sslopt: ssl socket optional dict. ping_interval: automatically send "ping" command every specified period(second) if set to 0, not send automatically. ping_timeout: timeout(second) if the pong message is not received. http_proxy_host: http proxy host name. http_proxy_port: http proxy port. If not set, set to 80. http_no_proxy: host names, which doesn't use proxy. skip_utf8_validation: skip utf8 validation. host: update host header. origin: update origin header. """ if not ping_timeout or ping_timeout <= 0: ping_timeout = None if ping_timeout and ping_interval and ping_interval <= ping_timeout: raise WebSocketException("Ensure ping_interval > ping_timeout") if sockopt is None: sockopt = [] if sslopt is None: sslopt = {} if self.sock: raise WebSocketException("socket is already opened") thread = None close_frame = None try: logging.debug("Starting") self.sock = websocket.WebSocket( self.get_mask_key, sockopt=sockopt, sslopt=sslopt, fire_cont_frame=self.on_cont_message and True or False, skip_utf8_validation=skip_utf8_validation) logging.debug("Created socket") self.sock.settimeout(websocket.getdefaulttimeout()) logging.debug("Connecting") self.sock.connect( self.url, header=self.header, cookie=self.cookie, http_proxy_host=http_proxy_host, http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy, http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols, host=host, origin=origin) logging.debug("Calling on open") self._callback(self.on_open) if ping_interval: event = threading.Event() thread = threading.Thread( target=self._send_ping, args=(ping_interval, event)) thread.setDaemon(True) thread.start() logging.debug("Entering mainloop") while self.sock.connected: logging.debug("Background websocket client calling select") r, w, e = select.select( (self.sock.sock, self.dispatch_queue.read_pipe), (), (), ping_timeout) if not self.keep_running: break if r and self.dispatch_queue.read_pipe in r: logging.debug("Background websocket client running queued jobs") n = self.dispatch_queue.run_jobs() if r and self.sock.sock in r: op_code, frame = self.sock.recv_data_frame(True) if op_code == websocket.ABNF.OPCODE_CLOSE: close_frame = frame break elif op_code == websocket.ABNF.OPCODE_PING: self._callback(self.on_ping, frame.data) elif op_code == websocket.ABNF.OPCODE_PONG: self.last_pong_tm = time.time() self._callback(self.on_pong, frame.data) elif op_code == websocket.ABNF.OPCODE_CONT and self.on_cont_message: self._callback(self.on_data, data, frame.opcode, frame.fin) self._callback(self.on_cont_message, frame.data, frame.fin) else: data = frame.data if six.PY3 and op_code == websocket.ABNF.OPCODE_TEXT: data = data.decode("utf-8") self._callback(self.on_data, data, frame.opcode, True) self._callback(self.on_message, data) if ping_timeout and self.last_ping_tm \ and time.time() - self.last_ping_tm > ping_timeout \ and self.last_ping_tm - self.last_pong_tm > ping_timeout: raise WebSocketTimeoutException("ping/pong timed out") logging.debug("While loop exited") except (Exception, KeyboardInterrupt, SystemExit) as e: traceback.print_exc() self._callback(self.on_error, e) if isinstance(e, SystemExit): # propagate SystemExit further raise finally: logging.debug("Everything has gone to shit") if thread and thread.isAlive(): event.set() thread.join() self.keep_running = False if self.sock is not None: self.sock.close() close_args = self._get_close_args( close_frame.data if close_frame else None) self._callback(self.on_close, *close_args) self.sock = None
def __call__(self, * args, ** kwArgs): thread = TimeoutHelperThread(self._func, args, kwArgs, name = self._name) thread.start() thread.join(self._timeout) if thread.isAlive(): raise chakana.error.Timeout(thread, self._timeout) if thread.error is None: return thread.result raise chakana.error.ChildException(thread.error, thread.exc_info)