def cleanup(self): """ Clean up before quitting """ self.pre_exit_trigger = True self.logger.info("Shutting down %s, please wait a moment.", self.name) for t in threading.enumerate(): if isinstance(t, TimerClass): t.cancel() self.logger.debug('Timers cancelled') for i in self.objects: i.cleanup() self.logger.debug('Sensors etc cleanups done') for ser in (i for i in self.services if isinstance(i, AbstractUserService)): ser.cleanup_system() self.logger.debug('User services cleaned up') if self.worker_thread.is_alive(): self.worker_thread.stop() self.logger.debug('Worker thread really stopped') for ser in (i for i in self.services if isinstance(i, AbstractSystemService)): ser.cleanup_system() self.logger.debug('System services cleaned up') threads = list(t.name for t in threading.enumerate() if t.is_alive() and not t.daemon) if threads: self.logger.info('After cleanup, we have still the following threads ' 'running: %s', ', '.join(threads))
def download_and_play(url,file_name,download_path,show_dialog=True): # Lanza thread logger.info("[download_and_play.py] Active threads "+str(threading.active_count())) logger.info("[download_and_play.py] "+repr(threading.enumerate())) logger.info("[download_and_play.py] Starting download thread...") download_thread = DownloadThread(url,file_name,download_path) download_thread.start() logger.info("[download_and_play.py] Download thread started") logger.info("[download_and_play.py] Active threads "+str(threading.active_count())) logger.info("[download_and_play.py] "+repr(threading.enumerate())) # Espera logger.info("[download_and_play.py] Waiting...") while True: cancelled=False if show_dialog: dialog = xbmcgui.DialogProgress() dialog.create('Descargando...', 'Cierra esta ventana para empezar la reproducción') dialog.update(0) while not cancelled and download_thread.is_alive(): dialog.update( download_thread.get_progress() , "Cancela esta ventana para empezar la reproducción", "Velocidad: "+str(int(download_thread.get_speed()/1024))+" KB/s "+str(download_thread.get_actual_size())+"MB de "+str(download_thread.get_total_size())+"MB" , "Tiempo restante: "+str( downloadtools.sec_to_hms(download_thread.get_remaining_time())) ) xbmc.sleep(1000) if dialog.iscanceled(): cancelled=True break dialog.close() else: xbmc.executebuiltin((u'XBMC.Notification("Iniciando", "Iniciando descarga en segundo plano...", 300)')) xbmc.sleep(3000) logger.info("[download_and_play.py] End of waiting") # Lanza el reproductor player = CustomPlayer() player.set_download_thread(download_thread) player.PlayStream( download_thread.get_file_name() ) # Fin de reproducción logger.info("[download_and_play.py] Fin de reproducción") if player.is_stopped(): logger.info("[download_and_play.py] Terminado por el usuario") break else: if not download_thread.is_alive(): logger.info("[download_and_play.py] La descarga ha terminado") break else: logger.info("[download_and_play.py] Continua la descarga") # Cuando el reproductor acaba, si continúa descargando lo para ahora logger.info("[download_and_play.py] Download thread alive="+str(download_thread.is_alive())) if download_thread.is_alive(): logger.info("[download_and_play.py] Killing download thread") download_thread.force_stop()
def exit(self, value=0, message=None): """ Exit the script with given exit value. If message is not None, it is printed on screen. """ if isinstance(value, bool): if value: value = 0 else: value = 1 else: try: value = int(value) if value < 0 or value > 255: raise ValueError except ValueError: value = 1 if message is not None: self.message(message) for t in filter(lambda t: t.name!='MainThread', threading.enumerate()): t.stop() while True: active = filter(lambda t: t.name!='MainThread', threading.enumerate()) if not len(active): break time.sleep(1) sys.exit(value)
def test_urlfetch_slow_server(self): """The function also times out if the server replies very slowly. (Do the server part in a separate thread.) """ sock, http_server_url = self.make_test_socket() sock.listen(1) stop_event = threading.Event() def slow_reply(): (client_sock, client_addr) = sock.accept() content = 'You are veeeeryyy patient!' client_sock.sendall(dedent("""\ HTTP/1.0 200 Ok Content-Type: text/plain Content-Length: %d\n\n""" % len(content))) # Send the body of the reply very slowly, so that # it times out in read() and not urlopen. for c in content: client_sock.send(c) if stop_event.wait(0.05): break client_sock.close() slow_thread = threading.Thread(target=slow_reply) slow_thread.start() saved_threads = set(threading.enumerate()) self.assertRaises(TimeoutError, urlfetch, http_server_url) # Note that the cleanup also takes care of leaving no worker thread behind. remaining_threads = set(threading.enumerate()).difference(saved_threads) self.assertEqual(set(), remaining_threads) stop_event.set() slow_thread.join()
def main(): #initial the database connection database = DB_helper.DB_helper("root", "900129lbb", "127.0.0.1", "Webscraping") csv = CSV_helper.CSV_helper() xml = XML_helper.XML_helper("Config.xml") threads = [] brands = xml.read() for brand, v_b in brands.items(): #every csv file is a brand thread = thread_handle(brand, v_b) thread.start() threads.append(thread) print len(threading.enumerate()) while True: if (len(threading.enumerate()) < 10): break else: print "sleep" time.sleep(10) for thread in threads: thread.join() #combine every csv into one xls file, #every csv file name is the sheet name print "start merging" csv.csvs_to_excel()
def main(): """Main function""" lock = threading.Lock() thread1 = Foo(lock, iterations=10) thread2 = Foo(lock, iterations=15) thread1.start() thread2.start() # Let the main thread do something too... for i in range(5): lock.acquire() print threading.currentThread().name, print threading.currentThread().ident, print threading.activeCount(), print threading.enumerate() lock.release() time.sleep(0.2) # Main thread waits for all threads to complete thread1.join() thread2.join() print "Exiting Main Thread"
def EnableHelloHueCallback(): Log("Trying to enable thread") #threading.Thread(target=run_websocket_watcher,name='thread_websocket').start() if not "thread_websocket" in str(threading.enumerate()): ValidatePrefs() Log(threading.enumerate()) return MainMenu(header=NAME, message='HelloHue is now enabled.')
def stop(self, signum=None, frame=None): self.alive = False self.logger.info("Number of Threads: {0}".format(threading.activeCount())) for item in self.__items: self.__item_dict[item]._fading = False try: self.scheduler.stop() except: pass try: self._plugins.stop() except: pass try: self.connections.close() except: pass for thread in threading.enumerate(): try: thread.join(1) except: pass if threading.active_count() > 1: for thread in threading.enumerate(): self.logger.info("Thread: {}, still alive".format(thread.name)) else: self.logger.info("SmartHome.py stopped") if MODE == 'default': os.remove(self._pidfile) logging.shutdown() exit()
def shutdown(self): BuiltinCore.shutdown(self) self.logger.info("Closing RPC command queues") self.rpc_q.close() def term_children(): """ Terminate all remaining multiprocessing children. """ for child in multiprocessing.active_children(): self.logger.error("Waited %s seconds to shut down %s, " "terminating" % (self.shutdown_timeout, child.name)) child.terminate() timer = threading.Timer(self.shutdown_timeout, term_children) timer.start() while len(multiprocessing.active_children()): self.logger.info("Waiting for %s child(ren): %s" % (len(multiprocessing.active_children()), [c.name for c in multiprocessing.active_children()])) time.sleep(1) timer.cancel() self.logger.info("All children shut down") while len(threading.enumerate()) > 1: threads = [t for t in threading.enumerate() if t != threading.current_thread()] self.logger.info("Waiting for %s thread(s): %s" % (len(threads), [t.name for t in threads])) time.sleep(1) self.logger.info("Shutdown complete")
def test_block(self): b = wspbus.Bus() self.log(b) def f(): time.sleep(0.2) b.exit() def g(): time.sleep(0.4) threading.Thread(target=f).start() threading.Thread(target=g).start() threads = [t for t in threading.enumerate() if not get_daemon(t)] self.assertEqual(len(threads), 3) b.block() # The block method MUST wait for the EXITING state. self.assertEqual(b.state, b.states.EXITING) # The block method MUST wait for ALL non-main, non-daemon threads to finish. threads = [t for t in threading.enumerate() if not get_daemon(t)] self.assertEqual(len(threads), 1) # The last message will mention an indeterminable thread name; ignore it self.assertEqual(self._log_entries[:-1], ['Bus STOPPING', 'Bus STOPPED', 'Bus EXITING', 'Bus EXITED', 'Waiting for child threads to terminate...'])
def _threadChecker(self): while True: try: # for those : # 1. might request thread is closed # 2. cause by bug ... actives = set(thread.ident for thread in threading.enumerate()) keepings = set(ident for ident in self._connections.keys()) useless = keepings - actives if useless: logging.warning('sqlpool : useless connection found (%d)' % len(useless)) # release useless connection for ident in useless: for thread in threading.enumerate(): if thread.ident == ident and thread.isAlive(): break else: self.__releaseConnection(ident) except Exception, e: logging.error('sqlpool error (_threadChecker) : %s' % str(e)) finally:
def test_multiple_clients_server_disconnect(self): """ Test to check multiple client connect/disconnect scenario. """ numclients = 40 self.ships = [] for x in xrange(numclients): self.ships.append(AIShip_Network_Harness("Add Me " + str(x), self.__bad_thrust_ship)) self.assertTrue(self.ships[-1].connect(self.cfg.getint("Server", "port")), "Didn't connect to server.") #next time.sleep(5) self.game.end() # server closes all connections self.server.disconnectAll() time.sleep(10) print threading.enumerate() self.assertFalse(self.server.isrunning(), "Server still running after disconnect.") for x in xrange(numclients): self.assertFalse(self.ships[x].isconnected(), "Client still connected to server after disconnect.") print threading.enumerate() self.assertEqual(len(threading.enumerate()), 1, "More than main thread running.")
def __init__(self): self.points = 60 self.living = True self.server = Server() self.spi = Spi(self.points) self.queueSize = 20 self.proccessQueue = queue.Queue(self.queueSize) self.oscWindow_1 = [] self.trigger = Trigger(1,-5) #Thread to handle reading from SPI then writing to Server spiThread = threading.Thread(target = self.spiRead) spiThread.name = "SPI_Thread" spiThread.deamon = True #Kill off on its own spiThread.start() #Thread to handle reading from Server then writing to SPI serverThread = threading.Thread(target = self.serverRead) serverThread.name = "SERVER_Thread" serverThread.deamon = True serverThread.start() print(threading.active_count()) for thrd in threading.enumerate(): if(thrd.isDaemon): print(thrd) while(self.living): x= 0 print(threading.active_count()) for thrd in threading.enumerate(): if(thrd.isDaemon): print(thrd)
def work(self): """ Performs the work. """ self._stamp('begin work()') logging.info('please be patient, this may take some time...') # We attempt analyze each url in the queue. # The queue contains initially only the start url. # The queue is populated by analyze instances (threads). count = 0 threads_list = [] finished = False while (not finished): if (count > self.maxu): while (len(threading.enumerate()) > 1): time.sleep(self.dt) finished = True elif (len(self.urls) > 0): while (len(threading.enumerate()) > self.maxt): time.sleep(self.dt) count += 1 url = self.urls.pop(0) logging.debug('urls=%i(%i,%i) edges=%i vertices=%i threads=%i', len(self.analyzed_urls), len(self.explored), len(self.urls), len(self.edges), len(self.vertices), len(threading.enumerate())) self.analyzed_urls.append(url) current = _Analyze(url,self) threads_list.append(current) current.start() else: finished = (len(threading.enumerate()) == 1) self._stamp('end work()')
def ValidatePrefs(): global auth, plex, hue, converter, active_clients, firstrun Log('Validating Prefs') auth = HueCheck().check_username() if auth is False: Log("Please update your Hue preferences and try again") if auth is True: Log("Hue username is registered... Starting!") converter = Converter() hue = Hue() CompileRooms() hue.get_hue_light_groups() InitiateCurrentStatus() plex = Plex() active_clients = [] Log("Classes initiated") if "thread_websocket" in str(threading.enumerate()): Log("Closing daemon...") ws.close() if not "thread_websocket" in str(threading.enumerate()): Log("Starting websocket daemon...") threading.Thread(target=run_websocket_watcher,name='thread_websocket').start() if "thread_clients" in str(threading.enumerate()): Log("Setting firstrun to True") firstrun = True if not "thread_clients" in str(threading.enumerate()): Log("Starting clients daemon...") threading.Thread(target=watch_clients,name='thread_clients').start() Log(threading.enumerate()) return MainMenu(header=NAME)
def stop_all(self): self.logger.info(threading.enumerate()) for t in self.threads: if t.is_alive(): t.stop() time.sleep(0.05) self.logger.info(threading.enumerate())
def start_recording(self): global video video = threading.Thread(target=self.screen_capture) video.daemon = True print(threading.enumerate()) video.start() print(threading.enumerate())
def multi_thread(queue): """ queue: 一个待处理的树的列表 return: 将处理好的树的列表返回 """ # 活动线程数 threads = len(threading.enumerate()) # 保存处理好后的tree列表 tree_result = [] while len(queue): # 这个多线程其实没必要。因为计算就是线性的。线程之间没有等待时间,所以基本上都是一个线程跑完了 if threads < 2: tree = queue.pop(0) t = threading.Thread(target=calculate_tree(tree), name='thread-' + str(threads)) t.start() print('线程:' + t.getName() + '开启') threads = len(threading.enumerate()) tree_result.append(tree) else: time.sleep(2) for i in tree_result: # 写入到文件 write_to_file(i, 1) return tree_result
def abortall(): for p in Cache.publishers: try: p.abort() except threading.ThreadError as e: log(msg=_('Error aborting: %s - Error: %s') % (str(p), str(e))) Cache.dispatcher.abort() for p in Cache.publishers: p.join(0.5) Cache.dispatcher.join(0.5) if len(threading.enumerate()) > 1: main_thread = threading.current_thread() log(msg=_('Enumerating threads to kill others than main (%i)') % main_thread.ident) for t in threading.enumerate(): if t is not main_thread and t.is_alive(): log(msg=_('Attempting to kill thread: %i: %s') % (t.ident, t.name)) try: t.abort(0.5) except (threading.ThreadError, AttributeError): log(msg=_('Error killing thread')) else: if not t.is_alive(): log(msg=_('Thread killed succesfully')) else: log(msg=_('Error killing thread'))
def __exit__(self, exc_type, exc_val, exc_tb): for thread in set(threading.enumerate()) - self.before: thread.join(self.timeout) if self.check_alive and thread.is_alive(): raise RuntimeError('Timeout joining thread %r' % thread) self.left_behind = sorted( set(threading.enumerate()) - self.before, key=lambda t: t.name)
def testThreadLeak(self): mailer = sm.SPM_MailMonitor(StoragePoolStub(), 100) threadCount = len(threading.enumerate()) mailer.stop() mailer.run() t = lambda: self.assertEquals(threadCount, len(threading.enumerate())) retry(AssertionError, t, timeout=4, sleep=0.1)
def main(): ok.clear() saveImgEvent.set() degree = "*" unit = "mm" Robot = ABBProgram() RobotQueue.put(Robot) if StartEvent.wait(0): TakePicture = threading.Thread(target=takeImage, name="Take Picture") TakePicture.daemon = True TakePicture.start() [Robot.runMain(unit, i) for i in xrange(2, 3)] # [Robot.runMain(degree, i) for i in xrange(3, 5)] Robot.Home() Robot.printDateTime() print >> Robot.f, "Program Finished" with print_lock: raw_input('Close program?') Robot.close() time.sleep(2) # end.wait() print threading.enumerate()
def exit(self, value=0, message=None): """ Exit the script with given exit value. If message is not None, it is output to stdout """ if isinstance(value, bool): if value: value = 0 else: value = 1 else: try: value = int(value) if value < 0 or value > 255: raise ValueError except ValueError: value = 1 if message is not None: self.message(message) for t in [t for t in threading.enumerate() if t.name != 'MainThread']: if hasattr(t, 'stop') and callable(t.stop): t.stop() while True: active = [t for t in threading.enumerate() if t.name != 'MainThread'] if not len(active): break time.sleep(0.1) sys.exit(value)
def poll(self): ''' Check for events from SUMA. ''' if self.proc_thread: if self.proc_thread not in threading.enumerate(): tkMessageBox.showwarning("Connection to SUMA lost", "The connection to SUMA has been lost. "+ "Reload your dataset to continue using COVI.") self.proc_not_ready() self.proc_thread = False if self.proc_thread and self.proc_thread.ready(): try: res = self.proc_thread.res_q.get_nowait() self.proc_thread.res_q.task_done() if res: if res[0] == 'node': self.node_number_label['text'] = '%i'%res[1] elif res[0] == 'cluster': self.cluster_number_label['text'] = '%i'%res[1] elif res[0] == 'area': self.curr_area_label['text'] = res[1] elif res[0] == 'ready': # Re-enable main window widgets set_state(self.real_root, 'enabled') except Empty: pass if self.net_thread and self.net_thread not in threading.enumerate(): tkMessageBox.showwarning("Connection to server lost", "The connection to the server has been lost. "+ "Reload your dataset to continue using COVI.") self.proc_not_ready() self.net_thread = False self.root.after(100, self.poll)
def shutdown(): from logs import lg from main import config from system import bpio lg.out(2, 'bpmain.shutdown') import shutdowner shutdowner.A('reactor-stopped') from automats import automat automat.objects().clear() if len(automat.index()) > 0: lg.warn('%d automats was not cleaned' % len(automat.index())) for a in automat.index().keys(): lg.out(2, ' %r' % a) else: lg.out(2, 'bpmain.shutdown automat.objects().clear() SUCCESS, no state machines left in memory') config.conf().removeCallback('logs/debug-level') lg.out(2, 'bpmain.shutdown currently %d threads running:' % len(threading.enumerate())) for t in threading.enumerate(): lg.out(2, ' ' + str(t)) lg.out(2, 'bpmain.shutdown finishing and closing log file, EXIT') automat.CloseLogFile() lg.close_log_file() if bpio.Windows() and bpio.isFrozen(): lg.stdout_stop_redirecting() return 0
def GenUrlListThreaded(self, max_threads=1): """Faster way to convert the file_list to url entries in url_list. Assumes that the objects will be downloaded from their native host, and therefore no prefix is needed. On pages that have lots of objects, this method checks them max_threads at a time until they have all been checked. Args: max_threads: how many objects to check at once. Returns: url_list: a reference to the urls pointing to the list of files. Raises: No exceptions handled here. No new exceptions generated here. """ main_thread = threading.currentThread() logging.info('fetching %d urls %d at a time', len(self.files), max_threads) files = copy.copy(self.files) while files: ended = (max_threads + 1) - len(threading.enumerate()) if ended: logging.debug('Starting %d HTTP threads', ended) for i in range(ended): t = threading.Thread(target=self.__GenUrlListThreadedWorker, args=(files,)) t.start() logging.debug('Starting %d of %d HTTP threads', i, ended) time.sleep(0.1) for t in threading.enumerate(): if t is not main_thread: t.join() logging.info('built %d urls', len(self.url_list)) return self.url_list
def generate_poisson_spiketrains_task(rate, number_of_spiketrains, time): # Init result and output-hdf5_file # res = [] # Init number of Semaphore s = threading.Semaphore(pool_size) local_data = threading.local() write_lock = threading.Lock() # pool = PoolActive() for i in range(number_of_spiketrains): t = threading.Thread(target=worker, name=str(i), args=(s, local_data, write_lock, rate, time)) t.start() # ----------- Joining , wait for all thread done --------- logging.debug('Waiting for worker threads') main_thread = threading.currentThread() print "Threading enumerate ", threading.enumerate() for t in threading.enumerate(): if t is not main_thread: print "t = ", t.getName() t.join() logging.debug('TASK DONE!') f.close()
def finish(self): self.hps.kill_watch_status() self.vps.kill_watch_status() self.logging_switch.clear() time.sleep(5) print "Tried to kill all threads. There are still ", threading.active_count(), " alive" threading.enumerate()
def foo(iterations): for i in range(iterations): print threading.currentThread().name, print threading.currentThread().ident, print threading.activeCount(), print threading.enumerate() time.sleep(0.2)
def handle_dataset_form(dataset_form_): keywords = dataset_form_['keywords'].value #prefix = dataset_form_['prefix'].value if 'submit!' == dataset_form_[create_button_txt].value: print 'creating!' keywords = keywords.split() prefix = dataset_form_['prefix'].value print 'before starting thread:', threading.enumerate() t = Thread(target=getThreadForCreateOutput(keywords, prefix, create_outputs), name=prefix) t.start() print 'thread started', threading.enumerate() elif 'submit!' == dataset_form_[shrink_button_txt].value: print 'shrinking!!' dataset = dataset_form_['degree_dataset'].value new_prefix = dataset_form_['new_prefix'].value #new_prefix = dataset + '_' + dataset_form_['threshold'].value threshold = int(dataset_form_['threshold'].value) create_outputs1(dataset, 'data', new_prefix, threshold) elif 'submit!' == dataset_form_[merge_button_txt].value: print 'merging!!' lst = check_checkboxes(dataset_form_) merge_prefix = dataset_form_['merge_prefix'].value from abstractparser import merge_files merge_files('data', lst, merge_prefix) '''
def DumpThreadStacks(_signal, _frame): for thread in threading.enumerate(): reraiser_thread.LogThreadStack(thread)
import time import thread import threading def childThread(): print "start child" time.sleep(10) print "end of child" class childThread ( threading.Thread ): def run ( self ): print "start child" time.sleep(10) print "end of child" #thread.start_new_thread(childThread,()) childThread().start() print threading.enumerate() for t in threading.enumerate(): if t is not threading.currentThread(): print "found" t.join() print "joined" print "end"
def assert_num_threads(): yield assert threading.active_count() == 1, threading.enumerate()
def lt(self): # list all threads with names self.push("{0} Threads:\n".format(threading.activeCount())) for t in threading.enumerate(): self.push("{0}\n".format(t.name))
def _enum_threads(self): """""" threads_list = threading.enumerate() return threads_list
def _get_main_thread(): for t in threading.enumerate(): if t.__class__.__name__ == '_MainThread': return t raise Exception("main thread not found?!")
FORMAT = '%(asctime)-15s\t [%(processName)s:%(threadName)s, %(process)d:%(thread)8d] %(message)s' logging.basicConfig(level=logging.INFO, format=FORMAT) def worker(n): logging.info('begin to work{}'.format(n)) time.sleep(5) logging.info('finished{}'.format(n)) # 创建线程池执行器,池的容量为3 executor = futures.ThreadPoolExecutor(max_workers=3) fs = [] # 任务集合管理 for i in range(3): future = executor.submit(worker, i) fs.append(future) while True: time.sleep(2) logging.info(threading.enumerate()) flag = True for f in fs: # logging.info(f.done()) flag = flag and f.done() if flag: executor.shutdown() # 清理池,池中线程全部杀掉 logging.info(threading.enumerate()) break
print("---test2---g_num=%d" % g_num) # 创建一个互斥锁 # 默认是未上锁的状态 mutex = threading.Lock() # 创建2个线程,让他们各自对g_num加1000000次 p1 = threading.Thread(target=count1, args=(1000000,)) p1.start() p2 = threading.Thread(target=count2, args=(1000000,)) p2.start() # 等待计算完成 while len(threading.enumerate()) != 1: time.sleep(1) print("2个线程对同一个全局变量操作之后的最终结果是:%s" % g_num) # 当一个线程调用锁的acquire()方法获得锁时,锁就进入“locked”状态。 # 每次只有一个线程可以获得锁。如果此时另一个线程试图获得这个锁,该线程就会变为“blocked”状态,称为“阻塞”, # 直到拥有锁的线程调用锁的release()方法释放锁之后,锁进入“unlocked”状态。 # 线程调度程序从处于同步阻塞状态的线程中选择一个来获得锁,并使得该线程进入运行(running)状态。 # 总结 # 锁的好处: # 确保了某段关键代码只能由一个线程从头到尾完整地执行 # 锁的坏处: # 阻止了多线程并发执行,包含锁的某段代码实际上只能以单线程模式执行,效率就大大地下降了
def dance(): for i in range(3): print("正在跳舞...%d" % i) sleep(1) if __name__ == '__main__': print('---开始---:%s' % ctime()) t1 = threading.Thread(target=sing) t2 = threading.Thread(target=dance) t1.start() t2.start() while True: length = len(threading.enumerate()) print('当前运⾏的线程数为:%d' % length) if length <= 1: break sleep(0.5) print('---结束---:%s' % ctime()) # threading.enumerate() """ ---开始---:Wed Sep 11 00:57:37 2019 正在唱歌...0 正在跳舞...0 当前运⾏的线程数为:3 当前运⾏的线程数为:3 正在唱歌...1
def stopSSHQueue(): """Shut down the SSH Threads""" for t in threading.enumerate(): if t.getName().startswith('SSHThread'): t.quit() return True
if __name__ == "__main__": try: better_exchook.install() if len(sys.argv) <= 1: for k, v in sorted(globals().items()): if k.startswith("test_"): print("-" * 40) print("Executing: %s" % k) try: v() except unittest.SkipTest as exc: print("SkipTest: %s" % exc) print("-" * 40) print("All passed.") else: assert len(sys.argv) >= 2 for arg in sys.argv[1:]: print("Executing: %s" % arg) if arg in globals(): globals()[arg]() # assume function and execute else: eval(arg) # assume Python code and execute finally: session.close() del session tf.reset_default_graph() import threading if len(list(threading.enumerate())) > 1: print("Warning, more than one thread at exit:") better_exchook.dump_all_thread_tracebacks()
def network_thread_running(): """Return whether the network thread is running.""" return any( [thread.name == "NetworkThread" for thread in threading.enumerate()])
def crawl(seeds, username, password, site_name, config, outf=None, dout=None, ngout=None): 'Crawl CDP/LLDP Neighbors to build a topology' # Queue for devices to scrape next q = Queue() # Queue for neighbor output from threads out_q = Queue() # Visited list for loop detection visited = list() # All Neighbor Entries neighbors = list() # Device entries for connection details (ipv4, os etc) devices = dict() # Thread tracking qtrack = dict() # Thread previous join attempts joined = list() # Distance tracking distances = dict() # Counter crawl_count = 0 iteration_count = 0 # Queue up seed devices for s in seeds: q.put(s) devices[s] = dict() devices[s]['remote_device_id'] = s devices[s]['ipv4'] = s devices[s]['os'] = config['main']['seed_os'] devices[s]['platform'] = 'Unknown' devices[s]['logged_in'] = True distances[s] = 0 # Outer Queue, starts inner queue and then adds all unvisited neighbors to queue when # inner queue is empty. Each iteration of outer queue visits all next level neighbors # at once inside inner queue via threads. while not q.empty(): iteration_count += 1 cqsize = q.qsize() if not config['main']['quiet']: if int(config['main'] ['log_level']) >= logging.WARNING and iteration_count > 1: pbar = tqdm(total=cqsize, unit='dev') pbar.set_description('Iteration %s' % str(iteration_count)) # Launch threads on everything in queue to scrape while not q.empty(): current = q.get() qsize = q.qsize() # Progress bar on warning level or above if not config['main']['quiet']: if int(config['main']['log_level'] ) >= logging.WARNING and iteration_count > 1: p_int = (cqsize - qsize) pbar.update(1) print('\r', end='') if crawl_count > int(config['main']['max_crawl']): logger.warning('Max Devices allowed already crawled') # Only scrape unvisited devices elif current not in visited: crawl_count += 1 visited.append(current) while threading.activeCount() > int( config['main']['thread_count']): qsize = q.qsize() logger.debug('Waiting for free thread - Q Size: %s', str(qsize)) sleep(1) # Throttle connections sleep(0.1) logger.info('Processing %s', current) # Start thread to scrape devices nd_thread = threading.Thread(target=gather_nd, \ kwargs={"device": devices[current], "username": username, \ "password": password, "out_q": out_q, \ "qtrack": qtrack}) nd_thread.start() # Join all threads from last iteration and warn if problems joining threads logger.info('Joining all active threads') main_thread = threading.currentThread() wait_timer = 15 for some_thread in threading.enumerate(): if some_thread != main_thread: tid = str(some_thread.ident) if tid in qtrack: tid = qtrack[tid] if tid not in joined: joined.append(tid) logger.debug('Joining Thread: %s', tid) some_thread.join(timeout=wait_timer) wait_timer = 1 else: logger.info('Thread running long time, ignoring: %s: %s', tid, str(some_thread)) # Process output queue of neighbor data and look for new neighbors to visit logger.info('Processing output queue') while not out_q.empty(): nd = out_q.get() # Gather distance info for n in nd: if n['local_device_id'] not in distances: distances[n['local_device_id']] = 100 if n['remote_device_id'] in distances: if distances[n['local_device_id']] > ( distances[n['remote_device_id']] + 1): distances[n['local_device_id']] = distances[ n['remote_device_id']] + 1 logger.info('Found new distances on %s: %s', n['local_device_id'], \ str(distances[n['remote_device_id']] + 1)) # Save all neighbor data for n in nd: n['distance'] = distances[n['local_device_id']] neighbors.append(n) rname = n['remote_device_id'] # Save device to devices if rname not in devices: devices[rname] = n # Update unknown devices, restore logged_in variable elif devices[rname]['platform'] == 'Unknown': logged_in = False if 'logged_in' in devices[rname]: logged_in = devices[rname]['logged_in'] devices[rname] = n devices[rname]['logged_in'] = logged_in # Save logged_in as False initially, update on another pass if 'logged_in' not in devices[n['local_device_id']]: devices[n['local_device_id']]['logged_in'] = False # Local device always was logged in to devices[n['local_device_id']]['logged_in'] = True logger.info('Processing Out_q entry %s on %s', rname, n['local_device_id']) # New Neighbor that has not been scraped, only scrape IOS/NXOS for now if rname not in visited: if n['os'] == 'cisco_nxos': if rname not in q.queue: q.put(rname) elif n['os'] == 'cisco_ios': if rname not in q.queue: q.put(rname) else: visited.append(rname) else: logger.debug('Already visited %s', rname) # Count Neighbors ncount = 0 for n in neighbors: ncount += 1 logger.info('Total neighbors: %s', str(ncount)) output.output_files(outf, ngout, dout, neighbors, devices, distances, site_name, config)
from threading import Thread, Lock, active_count, current_thread, Timer, enumerate import time for thread in enumerate(): print("Thread name is %s." % thread.getName()) #threading.Timer() #This function of threading module is used to create a new Thread and letting it know that it should only start after a specified time. Once it starts, it should call the specified function. def foo(): while True: delayed() time.sleep(2) def delayed(): print("ping the server", thread.getName()) print("exit the server", thread.getName()) thread1 = Thread(target=foo) thread1.start() print("this will print after thread1 finish")
def run(command_path = None): import sys tests = [] stdtty = console.save() opts = None default_exefilter = '*.exe' try: optargs = { '--rtems-tools': 'The path to the RTEMS tools', '--rtems-bsp': 'The RTEMS BSP to run the test on', '--user-config': 'Path to your local user configuration INI file', '--report-mode': 'Reporting modes, failures (default),all,none', '--list-bsps': 'List the supported BSPs', '--debug-trace': 'Debug trace based on specific flags', '--filter': 'Glob that executables must match to run (default: ' + default_exefilter + ')', '--stacktrace': 'Dump a stack trace on a user termination (^C)' } mailer.append_options(optargs) opts = options.load(sys.argv, optargs = optargs, command_path = command_path) mail = None output = None if opts.find_arg('--mail'): mail = mailer.mail(opts) # Request these now to generate any errors. from_addr = mail.from_address() smtp_host = mail.smtp_host() to_addr = opts.find_arg('--mail-to') if to_addr: to_addr = to_addr[1] else: to_addr = '*****@*****.**' output = log_capture() log.notice('RTEMS Testing - Tester, %s' % (version.str())) if opts.find_arg('--list-bsps'): bsps.list(opts) exe_filter = opts.find_arg('--filter') if exe_filter: exe_filter = exe_filter[1] else: exe_filter = default_exefilter opts.log_info() log.output('Host: ' + host.label(mode = 'all')) debug_trace = opts.find_arg('--debug-trace') if debug_trace: if len(debug_trace) != 1: debug_trace = debug_trace[1] else: raise error.general('no debug flags, can be: console,gdb,output') else: debug_trace = '' opts.defaults['exe_trace'] = debug_trace job_trace = 'jobs' in debug_trace.split(',') rtems_tools = opts.find_arg('--rtems-tools') if rtems_tools: if len(rtems_tools) != 2: raise error.general('invalid RTEMS tools option') rtems_tools = rtems_tools[1] else: rtems_tools = '%{_prefix}' bsp = opts.find_arg('--rtems-bsp') if bsp is None or len(bsp) != 2: raise error.general('RTEMS BSP not provided or an invalid option') bsp = config.load(bsp[1], opts) bsp_config = opts.defaults.expand(opts.defaults['tester']) report_mode = opts.find_arg('--report-mode') if report_mode: if report_mode[1] != 'failures' and \ report_mode[1] != 'all' and \ report_mode[1] != 'none': raise error.general('invalid report mode') report_mode = report_mode[1] else: report_mode = 'failures' executables = find_executables(opts.params(), exe_filter) if len(executables) == 0: raise error.general('no executables supplied') start_time = datetime.datetime.now() total = len(executables) reports = report.report(total) reporting = 1 jobs = int(opts.jobs(opts.defaults['_ncpus'])) exe = 0 finished = [] if jobs > len(executables): jobs = len(executables) while exe < total or len(tests) > 0: if exe < total and len(tests) < jobs: tst = test_run(exe + 1, total, reports, executables[exe], rtems_tools, bsp, bsp_config, opts) exe += 1 tests += [tst] if job_trace: _job_trace(tst, 'create', total, exe, tests, reporting) tst.run() else: dead = [t for t in tests if not t.is_alive()] tests[:] = [t for t in tests if t not in dead] for tst in dead: if job_trace: _job_trace(tst, 'dead', total, exe, tests, reporting) finished += [tst] tst.reraise() del dead if len(tests) >= jobs or exe >= total: time.sleep(0.250) if len(finished): reporting = report_finished(reports, report_mode, reporting, finished, job_trace) finished_time = datetime.datetime.now() reporting = report_finished(reports, report_mode, reporting, finished, job_trace) if reporting < total: log.warning('finished jobs does match: %d' % (reporting)) report_finished(reports, report_mode, -1, finished, job_trace) reports.summary() end_time = datetime.datetime.now() average_time = 'Average test time: %s' % (str((end_time - start_time) / total)) total_time = 'Testing time : %s' % (str(end_time - start_time)) log.notice(average_time) log.notice(total_time) if mail is not None and output is not None: m_arch = opts.defaults.expand('%{arch}') m_bsp = opts.defaults.expand('%{bsp}') build = ' %s:' % (reports.get_config('build', not_found = '')) subject = '[rtems-test] %s/%s:%s %s' % (m_arch, m_bsp, build, reports.score_card('short')) np = 'Not present in test' ver = reports.get_config('version', not_found = np) build = reports.get_config('build', not_found = np) tools = reports.get_config('tools', not_found = np) body = [total_time, average_time, '', 'Host', '====', host.label(mode = 'all'), '', 'Configuration', '=============', 'Version: %s' % (ver), 'Build : %s' % (build), 'Tools : %s' % (tools), '', 'Summary', '=======', '', reports.score_card(), '', reports.failures(), 'Log', '===', ''] + output.get() mail.send(to_addr, subject, os.linesep.join(body)) except error.general as gerr: print(gerr) sys.exit(1) except error.internal as ierr: print(ierr) sys.exit(1) except error.exit: sys.exit(2) except KeyboardInterrupt: if opts is not None and opts.find_arg('--stacktrace'): print('}} dumping:', threading.active_count()) for t in threading.enumerate(): print('}} ', t.name) print(stacktraces.trace()) log.notice('abort: user terminated') killall(tests) sys.exit(1) finally: console.restore(stdtty) sys.exit(0)
def handle(self, *args, **options): from accounts.models import Patron, Address self.product_links = {} self.product_families = [ # Alimenter - Stocker '78-alimenter-en-electricite', '79-cuve-fioul', '121-alimenter-en-air', # Chauffer - Assecher '81-assecher', '82-ventiler', '84-chauffer', # Compacter '85-terrassement', '86-mesurer', # consommables '77-consommables?id_category=77&n=50', # Decorer - Entretenir - Souder '87-souder', '83-decouper', '123-decoration', '124-entretien', # engins-batiment-travaux-publique '89-minipelle?id_category=89&n=20', '90-minichargeur', '91-dumper', '92-rouleau-vibrant', '93-plaque-vibrante', '130-accessoires-engins-de-travaux-publics?id_category=130&n=20', # evenementiel '76-evenementiel', # Jardiner '94-nettoyer', '95-entretien?id_category=95&n=20', '96-tailler', '97-decouper', '98-preparer-la-terre', '132-accessoires-d-engins-de-travaux-public-et-terrassement', # Nettoyer-Decaper-Pomper '99-nettoyer?id_category=99&n=20', '100-pomper', '118-aspirer-souffler', '125-decaper', # Signaler-Securiser-Eclairer '101-securiser?id_category=101&n=20', '102-eclairer', '103-communiquer', '104-signaler?id_category=104&n=20', # Traiter le beton '143-casser?id_category=143&n=50', '133-decouper', '88-perforer', '134-poncer', '135-preparer-le-beton?id_category=135&n=50', '136-vibrer', '144-forer', # Traiter le bois '137-decouper', '138-fixer', '139-perforer', '140-poncer', '141-visser-devisser', # Transport - Lever - Manutentionner '105-manutentionner', '106-lever', '108-remorquer', '126-tirer', '145-accesoires-de-levage', # Travailler en equipe '109-echafaudage', '110-pir', '111-echelle', '112-tretaux', ] #Get the user try: self.patron = Patron.objects.get(username=self.username) except Patron.DoesNotExist: print "Can't find user 'Klm Location'" return self.address = self.patron.default_address or self.patron.addresses.all( )[0] # self._subpage_crawler() # self._product_crawler() for i in xrange(self.thread_num): threading.Thread(target=self._subpage_crawler).start() for thread in threading.enumerate(): if thread is not threading.currentThread(): thread.join() # Create the products in the database for i in xrange(self.thread_num): threading.Thread(target=self._product_crawler).start() for thread in threading.enumerate(): if thread is not threading.currentThread(): thread.join()
logging.debug('Waiting for lock') #self.lock.acquire() try: #logging.debug('Acquired lock') self.value = self.value + 1 finally: pass #self.lock.release() def worker(c): for i in range(2): pause = random.random() logging.debug('Sleeping %0.02f', pause) time.sleep(pause) c.increment() logging.debug("Counter is {}".format(c.value)) logging.debug('Done') counter = Counter() for i in range(2): t = threading.Thread(target=worker, args=(counter, )) t.start() logging.debug('Waiting for worker threads') main_thread = threading.currentThread() for t in threading.enumerate(): if t is not main_thread: t.join() logging.debug('Counter: %d', counter.value)
class MyThread(threading.Thread): def __init__(self, name, delay): threading.Thread.__init__(self) self.name = name self.delay = delay def run(self): print('start thread') display(self.name, self.delay) print("end thread") if __name__ == "__main__": t1 = MyThread('Thread1', 1) t2 = MyThread('Thread2', 2) t1.start() t2.start() print(t1.getName()) print(t2.getName()) print(threading.activeCount()) print(threading.currentThread()) print(threading.enumerate()) t1.join() t2.join() print("Done")
def threads_info(q): q.put(len(threading.enumerate())) q.put(threading.current_thread().name)
def dump_thread_info(): for t in threading.enumerate(): print(t) print(run("ps aux | grep 'node\\|java\\|python'"))
def signals(signal, frame): print "Got signal, terminating" for thread in threading.enumerate(): if thread.isAlive() and isinstance(thread, StoppableThread): thread.stopThread()
# threading 类是基于线程的并行 # # 基本用法 基本的调度 上锁 import threading print("默认运行线程数量 {}, 线程为 {}".format(threading.active_count(), threading.enumerate()))
for x in num: ConfigurationTest_Boolen == 0 thread_counter += 1 if (thread_counter % 100) == 0: print(f"\n\n\nSleep {thread_counter}\n\n\n") time.sleep(20) print("\n\n\nAfter Sleep\n\n\n") print(f"\t\tWe are Processing this IP {x}\n") try: my_thread = threading.Thread(target=ConfigurationTest, args=(x, 0, 0)) my_thread.start() except Exception: FailedExceptionIps.append(num[x]) main_thread = threading.currentThread() for some_thread in threading.enumerate(): if some_thread != main_thread: print(some_thread) some_thread.join() # loginandcopy('10.231.0.84','khyat','P@ssw0rd','a1.py','a1.py') print("\nAfter Finishing operations on devices\n") #################################################################################################### ################### Add new IPs and Remove Deplicated IPs ##################################### #################################################################################################### num_New = list(dict.fromkeys(num_New)) # to Remove Deplicated IPs num = list(dict.fromkeys(num)) # to Remove Deplicated IPs if len(num_New) != 0: print("\n\t\tNew Discovered IPs from cdp neighbors in num_New") # print (num_New)
def download_and_play(url, file_name, download_path): # Lanza thread logger.info("Active threads " + str(threading.active_count())) logger.info("" + repr(threading.enumerate())) logger.info("Starting download thread...") download_thread = DownloadThread(url, file_name, download_path) download_thread.start() logger.info("Download thread started") logger.info("Active threads " + str(threading.active_count())) logger.info("" + repr(threading.enumerate())) # Espera logger.info("Waiting...") while True: cancelled = False dialog = xbmcgui.DialogProgress() dialog.create('Descargando...', 'Cierra esta ventana para empezar la reproducción') dialog.update(0) while not cancelled and download_thread.isAlive(): dialog.update( download_thread.get_progress(), "Cancela esta ventana para empezar la reproducción", "Velocidad: " + str(int(download_thread.get_speed() / 1024)) + " KB/s " + str(download_thread.get_actual_size()) + "MB de " + str(download_thread.get_total_size()) + "MB", "Tiempo restante: " + str( downloadtools.sec_to_hms( download_thread.get_remaining_time()))) xbmc.sleep(1000) if dialog.iscanceled(): cancelled = True break dialog.close() logger.info("End of waiting") # Lanza el reproductor player = CustomPlayer() player.set_download_thread(download_thread) player.PlayStream(download_thread.get_file_name()) # Fin de reproducción logger.info("Fin de reproducción") if player.is_stopped(): logger.info("Terminado por el usuario") break else: if not download_thread.isAlive(): logger.info("La descarga ha terminado") break else: logger.info("Continua la descarga") # Cuando el reproductor acaba, si continúa descargando lo para ahora logger.info("Download thread alive=" + str(download_thread.isAlive())) if download_thread.isAlive(): logger.info("Killing download thread") download_thread.force_stop()
def get_active_threads(): """Return the list of active thread objects""" return threading.enumerate()
def __del__(self): for thread in threading.enumerate(): print(thread.name)
def main(*args): """Process command line arguments and invoke bot.""" imagepage = None always = False category = u'' delete_after_done = False # Load a lot of default generators local_args = pywikibot.handle_args(args) genFactory = pagegenerators.GeneratorFactory() for arg in local_args: if arg == '-always': always = True elif arg.startswith('-cc:'): category = arg[len('-cc:'):] elif arg == '-delete': delete_after_done = True else: genFactory.handleArg(arg) pregenerator = genFactory.getCombinedGenerator(preload=True) if not pregenerator: pywikibot.bot.suggest_help(missing_generator=True) return False for page in pregenerator: skip = False if page.exists() and (page.namespace() == 6) and (not page.isRedirectPage()): imagepage = pywikibot.FilePage(page.site, page.title()) # First do autoskip. if doiskip(imagepage.get()): pywikibot.output("Skipping " + page.title()) skip = True else: # The first upload is last in the list. try: username = imagepage.getLatestUploader()[0] except NotImplementedError: # No API, using the page file instead (datetime, username, resolution, size, comment) = imagepage.getFileVersionHistory().pop() if always: newname = imagepage.title(withNamespace=False) CommonsPage = pywikibot.Page( pywikibot.Site('commons', 'commons'), u'File:%s' % newname) if CommonsPage.exists(): skip = True else: while True: # Do the TkdialogIC to accept/reject and change te name (newname, skip) = TkdialogIC( imagepage.title(withNamespace=False), imagepage.get(), username, imagepage.permalink(with_protocol=True), imagepage.templates()).getnewname() if skip: pywikibot.output('Skipping this image') break # Did we enter a new name? if len(newname) == 0: # Take the old name newname = imagepage.title(withNamespace=False) else: newname = newname.decode('utf-8') # Check if the image already exists CommonsPage = pywikibot.Page( imagepage.site.image_repository(), u'File:' + newname) if not CommonsPage.exists(): break else: pywikibot.output( 'Image already exists, pick another name or ' 'skip this image') # We dont overwrite images, pick another name, go to # the start of the loop if not skip: imageTransfer(imagepage, newname, category, delete_after_done).start() pywikibot.output('Still ' + str(threading.activeCount()) + ' active threads, lets wait') for openthread in threading.enumerate(): if openthread != threading.currentThread(): openthread.join() pywikibot.output(u'All threads are done')
def test_read_thread_safe(self): """ Tests for race conditions. Reading n_threads (currently 30) times the same waveform file in parallel and compare the results which must be all the same. """ data = np.arange(0, 500) start = UTCDateTime(2009, 1, 13, 12, 1, 2, 999000) formats = _get_default_eps('obspy.plugin.waveform', 'writeFormat') for format in formats: # XXX: skip SEGY and SU formats for now as they need some special # headers. if format in ['SEGY', 'SU', 'SEG2']: continue dt = np.int_ if format in ('MSEED', 'GSE2'): dt = np.int32 tr = Trace(data=data.astype(dt)) tr.stats.network = "BW" tr.stats.station = "MANZ1" tr.stats.location = "00" tr.stats.channel = "EHE" tr.stats.calib = 0.999999 tr.stats.delta = 0.005 tr.stats.starttime = start # create waveform file with given format and byte order with NamedTemporaryFile() as tf: outfile = tf.name tr.write(outfile, format=format) if format == 'Q': outfile += '.QHD' n_threads = 30 streams = [] timeout = 120 if 'TRAVIS' in os.environ: timeout = 570 # 30 seconds under Travis' limit cond = threading.Condition() def test_functions(streams, cond): st = read(outfile, format=format) streams.append(st) with cond: cond.notify() # Read the ten files at one and save the output in the just # created class. our_threads = [] for _i in range(n_threads): thread = threading.Thread(target=test_functions, args=(streams, cond)) thread.start() our_threads.append(thread) our_threads = set(our_threads) # Loop until all threads are finished. start = time.time() while True: with cond: cond.wait(1) remaining_threads = set(threading.enumerate()) if len(remaining_threads & our_threads) == 0: break # Avoid infinite loop and leave after some time; such a # long time is needed for debugging with valgrind or Travis elif time.time() - start >= timeout: # pragma: no cover msg = 'Not all threads finished after %d seconds!' % ( timeout) raise Warning(msg) # Compare all values which should be identical and clean up # files for st in streams: np.testing.assert_array_equal(st[0].data, tr.data) if format == 'Q': os.remove(outfile[:-4] + '.QBN') os.remove(outfile[:-4] + '.QHD')
def post_all(self, user_ids=None): """ Post all profiles @user_ids list of str which are user ids like 'ad|test' Returns list of failed users (empty if no failure) """ self.__deferred_init() qs = "/v2/user" threads = [] failed_users = queue.Queue() logger.info("Received {} user profiles to post".format(len(self.profiles))) if user_ids is not None: logger.info( "Requesting a specific list of user_id's to post {} (total user_ids: {}, total profiles: {})".format( user_ids, len(user_ids), len(self.profiles) ) ) if not isinstance(user_ids, list): raise PublisherError("user_ids must be a list", user_ids) # list what to delete, then delete instead of slower copy list operations or filters # This is because some data sets are huge / GBs of data xlist = [] for idx, profile in enumerate(self.profiles): if profile.user_id.value is None: if self.known_cis_users_by_email.get(profile.primary_email.value) not in user_ids: xlist.append(idx) elif profile.user_id.value not in user_ids: xlist.append(idx) for i in reversed(xlist): if self.profiles[i].active.value: del self.profiles[i] logger.info("After filtering, we have {} user profiles to post".format(len(self.profiles))) # XXX - we already validate in the API, is this needed? # self.validate() for profile in self.profiles: # If we have no user_id provided we need to find it here # These are always considered updated users, not new users if profile.user_id.value is None: user_id = self.known_cis_users_by_email[profile.primary_email.value] else: user_id = profile.user_id.value # Filter out non-updatable attributes as needed self.filter_known_cis_users(profiles=[profile]) threads.append(threading.Thread(target=self._really_post, args=(user_id, qs, profile, failed_users))) threads[-1].start() num_threads = len(threading.enumerate()) while num_threads >= self.max_threads: time.sleep(1) num_threads = len(threading.enumerate()) logger.info("Too many concurrent threads, waiting a bit...") logger.debug("Waiting for threads to terminate...") for t in threads: t.join() logger.debug("Retrieving results from the queue...") ret = [] while not failed_users.empty(): ret.append(failed_users.get()) failed_users.task_done() return ret
def load_honeypot_engine(): """ load OHP Engine Returns: True """ # print logo logo() # parse argv parser, argv_options = argv_parser() ######################################### # argv rules apply ######################################### # check help menu if argv_options.show_help_menu: parser.print_help() exit_success() # check for requirements before start check_for_requirements(argv_options.start_api_server) # check api server flag if argv_options.start_api_server: start_api_server() exit_success() # check selected modules if argv_options.selected_modules: selected_modules = list(set(argv_options.selected_modules.rsplit(","))) if "all" in selected_modules: selected_modules = load_all_modules() if "" in selected_modules: selected_modules.remove("") # if selected modules are zero if not len(selected_modules): exit_failure(messages("en", "zero_module_selected")) # if module not found for module in selected_modules: if module not in load_all_modules(): exit_failure(messages("en", "module_not_found").format(module)) # check excluded modules if argv_options.excluded_modules: excluded_modules = list(set(argv_options.excluded_modules.rsplit(","))) if "all" in excluded_modules: exit_failure("you cannot exclude all modules") if "" in excluded_modules: excluded_modules.remove("") # remove excluded modules for module in excluded_modules: if module not in load_all_modules(): exit_failure(messages("en", "module_not_found").format(module)) # ignore if module not selected, it will remove anyway try: selected_modules.remove(module) except Exception as _: del _ # if selected modules are zero if not len(selected_modules): exit_failure(messages("en", "zero_module_selected")) virtual_machine_container_reset_factory_time_seconds = argv_options. \ virtual_machine_container_reset_factory_time_seconds global verbose_mode verbose_mode = argv_options.verbose_mode run_as_test = argv_options.run_as_test ######################################### # argv rules apply ######################################### # build configuration based on selected modules configuration = honeypot_configuration_builder(selected_modules) info(messages("en", "honeypot_started")) info(messages("en", "loading_modules").format(", ".join(selected_modules))) # check for conflict in real machine ports and pick new ports info("checking for conflicts in ports") configuration = conflict_ports(configuration) # stop old containers (in case they are not stopped) stop_containers(configuration) # remove old containers (in case they are not updated) remove_old_containers(configuration) # remove old images (in case they are not updated) remove_old_images(configuration) # create new images based on selected modules create_new_images(configuration) # create OWASP Honeypot networks in case not exist create_ohp_networks() # start containers based on selected modules configuration = start_containers(configuration) # start network monitoring thread new_network_events_thread = threading.Thread( target=new_network_events, args=(configuration, ), name="new_network_events_thread") new_network_events_thread.start() info("all selected modules started: {0}".format( ", ".join(selected_modules))) bulk_events_thread = threading.Thread( target=insert_bulk_events_from_thread, args=(), name="insert_events_in_bulk_thread") bulk_events_thread.start() # run module processors run_modules_processors(configuration) # check if it's not a test if not run_as_test: # wait forever! in case user can send ctrl + c to interrupt wait_until_interrupt( virtual_machine_container_reset_factory_time_seconds, configuration, new_network_events_thread) # kill the network events thread terminate_thread(new_network_events_thread) terminate_thread(bulk_events_thread) insert_events_in_bulk( ) # if in case any events that were not inserted from thread # stop created containers stop_containers(configuration) # stop module processor stop_modules_processors(configuration) # remove created containers remove_old_containers(configuration) # remove created images remove_old_images(configuration) # remove_tmp_directories() error: access denied! # kill all missed threads for thread in threading.enumerate()[1:]: terminate_thread(thread, False) info("finished.") # reset cmd/terminal color finish() return True