def test__count(self): # Test the _count() function. orig = thread._count() mut = thread.allocate_lock() mut.acquire() started = [] def task(): started.append(None) mut.acquire() mut.release() with support.wait_threads_exit(): thread.start_new_thread(task, ()) while not started: time.sleep(POLL_SLEEP) self.assertEqual(thread._count(), orig + 1) # Allow the task to finish. mut.release() # The only reliable way to be sure that the thread ended from the # interpreter's point of view is to wait for the function object to be # destroyed. done = [] wr = weakref.ref(task, lambda _: done.append(None)) del task while not done: time.sleep(POLL_SLEEP) self.assertEqual(thread._count(), orig)
def wait_threads_exit(timeout=None): """ bpo-31234: Context manager to wait until all threads created in the with statement exit. Use _thread.count() to check if threads exited. Indirectly, wait until threads exit the internal t_bootstrap() C function of the _thread module. threading_setup() and threading_cleanup() are designed to emit a warning if a test leaves running threads in the background. This context manager is designed to cleanup threads started by the _thread.start_new_thread() which doesn't allow to wait for thread exit, whereas thread.Thread has a join() method. """ if timeout is None: timeout = support.SHORT_TIMEOUT old_count = _thread._count() try: yield finally: start_time = time.monotonic() for _ in support.sleeping_retry(timeout, error=False): support.gc_collect() count = _thread._count() if count <= old_count: break else: dt = time.monotonic() - start_time msg = (f"wait_threads() failed to cleanup {count - old_count} " f"threads after {dt:.1f} seconds " f"(count: {count}, old count: {old_count})") raise AssertionError(msg)
def test__count(self): # Test the _count() function. orig = thread._count() mut = thread.allocate_lock() mut.acquire() started = [] def task(): started.append(None) mut.acquire() mut.release() with threading_helper.wait_threads_exit(): thread.start_new_thread(task, ()) while not started: time.sleep(POLL_SLEEP) self.assertEqual(thread._count(), orig + 1) # Allow the task to finish. mut.release() # The only reliable way to be sure that the thread ended from the # interpreter's point of view is to wait for the function object to be # destroyed. done = [] wr = weakref.ref(task, lambda _: done.append(None)) del task while not done: time.sleep(POLL_SLEEP) support.gc_collect() # For PyPy or other GCs. self.assertEqual(thread._count(), orig)
def test__count(self): # Test the _count() function. orig = thread._count() mut = thread.allocate_lock() mut.acquire() started = [] def task(): started.append(None) mut.acquire() mut.release() with support.wait_threads_exit(): thread.start_new_thread(task, ()) while not started: time.sleep(POLL_SLEEP) self.assertEqual(thread._count(), orig + 1) # Allow the task to finish. mut.release() # The only reliable way to be sure that the thread ended from the # interpreter's point of view is to wait for the function object to be # destroyed. done = [] wr = weakref.ref(task, lambda _: done.append(None)) del task # while not done: # Truffle change deadline = time.monotonic() + 30.0 while thread._count() != orig and time.monotonic() < deadline: time.sleep(POLL_SLEEP) self.assertEqual(thread._count(), orig)
def test_thread_count(self): import _thread, time feedback = [] please_start = [] def f(): feedback.append(42) self.waitfor(lambda: please_start) assert _thread._count() == 0 _thread.start_new_thread(f, ()) self.waitfor(lambda: feedback) assert _thread._count() == 1 please_start.append(1) # trigger
def periodic(lists, page=1): global worker_pool print(f'=======PAGE {page} STARTED========') workbook = xlsxwriter.Workbook(f'{DIRNAME}/temp/{FILENAME}{page}.xlsx') worksheet = workbook.add_worksheet() row = 1 while len(lists) > 0: while _thread._count() >= WORKER: time.sleep(0.2) val = lists.pop() _thread.start_new_thread(worker, (val, row, page)) time.sleep(0.1) if len(lists) > 0: row += 1 print('Collecting all workers data') while _thread._count() > 1: time.sleep(0.1) print('Writing to disk...') head_set = [] for i in worker_pool.get(page): if len(i) == 2: for j in i[1].keys(): head_set.append(j) head_set = list(set(head_set)) offside = 0 for i, val in enumerate(head_data): if 'Imagelink' in val: for j in head_set: if 'Imagelink' in j: worksheet.write(0, i + offside, 'Imagelink') offside += 1 else: worksheet.write(0, i, val) for i in worker_pool.get(page): if len(i) == 2: offside = 0 for j, val in enumerate(head_data): if 'Imagelink' in val: temp = [k for k in i[1].keys() if 'Imagelink' in k] for k in temp: worksheet.write(i[0], j + offside, i[1].get(k, '')) offside += 1 elif type(i[1].get(val, '')) is not list: worksheet.write(i[0], j + offside, i[1].get(val, '')) else: print(val, i[1].get(val, '')) workbook.close() print(f"====PAGE {page} DONE====")
def threading_cleanup(*original_values): _MAX_COUNT = 100 for count in range(_MAX_COUNT): values = _thread._count(), threading._dangling if values == original_values: break if not count: # Display a warning at the first iteration support.environment_altered = True dangling_threads = values[1] support.print_warning(f"threading_cleanup() failed to cleanup " f"{values[0] - original_values[0]} threads " f"(count: {values[0]}, " f"dangling: {len(dangling_threads)})") for thread in dangling_threads: support.print_warning(f"Dangling thread: {thread!r}") # Don't hold references to threads dangling_threads = None values = None time.sleep(0.01) support.gc_collect()
def threading_cleanup(nb_threads): if not _thread: return _MAX_COUNT = 10 for count in range(_MAX_COUNT): n = _thread._count() if n == nb_threads: break time.sleep(0.1)
def entr(): global s if s._closed: print(_thread._count()) if askyesno("Reconnect", "Connection closed! Do you want to reconnect?"): _thread.start_new_thread(listen, ()) else: s.send(pickle.dumps(textinputbox.get(index1="1.0", index2=END)))
def incre(a): #while a.i < 10: q = 0 while q < 10: q += 1 lock.acquire() print(a.i, "INC") a.i += 1 lock.release() print(thread._count())
def ffunc(q, *a): _thread.get_ident() _thread._count() _thread.stack_size() local = _thread._local() try: q.empty() q.qsize() ln = rnd.randint(0, 99) for _ in range(ln): rnd.choice([q.put, q.put_nowait])(fitem()) for _ in range(ln): if fbool(): q.get(fbool(), rnd.random()) else: q.get_nowait() except ReferenceError: pass return list(os.urandom(rnd.randint(0, 99)))
def periodic(lists, page=1): print(f'=======PAGE {page} STARTED========') workbook = xlsxwriter.Workbook(f'{DIRNAME}/temp/{FILENAME}{page}.xlsx') worksheet = workbook.add_worksheet() row = 1 for i, val in enumerate(head_data): worksheet.write(0, i, val) while len(lists) > 0: while _thread._count() >= WORKER: time.sleep(1) val = lists.pop() _thread.start_new_thread(worker, (val, worksheet, row, page)) time.sleep(1) if len(lists) > 0: row += 1 workbook.close() print(f"====PAGE {page} DONE====")
def on_click_run(self): # scriptname = 'noc_flow_analysis' #Check the ones selected # items = self.listbox.selectedItems() indexes = self.listbox.selectedIndexes() cmd_list = [] for item in indexes: i = item.row() print('Running command {} out of {} ({:0.2f}%)'.format( i, len(indexes), (i * 100) / len(indexes))) args = files_io.load_line(self.list_dir_names[i] + 'simulation-info.txt') j = self.list_vnames[i].rfind('s') args += ' --shaper=' + self.list_vnames[i][j + 1:j + 3] cmd = 'python3.5 ' + self.selected_script + \ ' --inputdir=' + self.list_dir_names[i] + ' --outputdir=' + self.list_dir_names[i] + 'post/ ' + \ ' --basedir=' + self.base_dir.text() + ' ' + \ args + ' ' + self.textbox_args_additional.text() try: launch_thread = True if (launch_thread == True): _thread.start_new_thread(os.system, (cmd, )) print('Thread launched with command: ' + cmd) sleep(0.1) while _thread._count( ) >= 5: #how many threads in parallel (usually the number of cores, unless the #thread called is multi-thread already sleep(0.e1) else: print('Wait for command execution: ' + cmd) os.system(cmd) except: print('Error running command')
def main(file, port=3389, workers=4001): """this def gon read the file and call the workers""" with open(file, "r") as file: #opeing the file file = file.readlines() # reading all lines in file for ranges in file: #reading file line by line if ranges.strip("\n"): #removing empty line form file """Converting lines to ips and ips to range """ IpStartBase = ranges.strip().split("-")[0] IpEndBase = ranges.strip().split("-")[1] StartIp = ipaddress.IPv4Address(IpStartBase) Endip = ipaddress.IPv4Address(IpEndBase) for ip in range(int(StartIp), int(Endip)): #Looping from ranges of ip try: """"Checking if we have more then SetLimet threads and if not start the new thread""" while True: if len(threading.enumerate()) < workers: thread_ = Process_port(ip, port, thread._count()) thread_.daemon = True thread_.start() thread_list.append(thread_) break except KeyboardInterrupt: #if users Interrupt the cod do this init( ) #we gon call init object form Colorama so we can use windows don't filter ANSI print(Fore.RED + "Look Like We are done Hare!") quit() #quit break # break the loop for t in thread_list: """ Joining threads together """ t.join()
def monitor(): global PASSWORD_DIC, THREAD_COUNT, TIMEOUT, WHITE_LIST while True: queue_count = na_task.find({"status": 0, "plan": 0}).count() if queue_count: load = 1 else: ac_count = thread._count() load = float(ac_count - 6) / THREAD_COUNT if load > 1: load = 1 if load < 0: load = 0 na_heart.update( {"name": "load"}, {"$set": { "value": load, "up_time": datetime.datetime.now() }}) PASSWORD_DIC, THREAD_COUNT, TIMEOUT, WHITE_LIST = get_config() if load > 0: time.sleep(8) else: time.sleep(60)
PASSWORD_DIC, THREAD_COUNT, TIMEOUT, WHITE_LIST = get_config() thread.start_new_thread(monitor, ()) thread.start_new_thread(kp_check, ()) thread.start_new_thread(kp_update, ()) while True: try: task_id, task_plan, task_target, task_plugin = queue_get() if task_id == '': time.sleep(10) continue if PLUGIN_DB: del sys.modules[PLUGIN_DB.keys()[0]] # 清理插件缓存 PLUGIN_DB.clear() for task_netloc in task_target: while True: if int(thread._count()) < THREAD_COUNT: if task_netloc[0] in WHITE_LIST: break try: thread.start_new_thread( vulscan, (task_id, task_netloc, task_plugin)) except Exception as e: print(e) break else: time.sleep(2) if task_plan == 0: na_task.update({"_id": task_id}, {"$set": {"status": 2}}) except Exception as e: print(e)
# _thread.start_new_thread(crawl, (url,)) page = PAGE_START status = True CRAWLER_TYPE = 'shop' def url_detection(url): if re.match(r'(www\.|.*)alibaba\.com/products/', url): return 'general' return 'shop' while status or _thread._count() > 0: if PAGES is None: pass elif page > PAGES: status = False continue url_parsed = parse.urlparse(url) params = parse.parse_qs(url_parsed.query) if url_detection(url) == 'general': params = [f'{i}={(params.get(i)[0])}' for i in params.keys()] params = "&".join(params) _target = f'{url_parsed.scheme}://{url_parsed.netloc}{url_parsed.path}?{params}' else: params = [f'{i}={(params.get(i)[0])}' for i in params.keys()] params = "&".join(params) _page = f'/productlist-{page}.html'
cv2.putText( frame, str("Tiempo abierto: " + str(tiempoOjoAbierto)), (320, 350), font, 0.4, (255, 255, 255), 1, cv2.LINE_AA) cv2.putText( frame, str("Tiempo cerrado: " + str(tiempoOjoCerrado)), (320, 370), font, 0.4, (255, 255, 255), 1, cv2.LINE_AA) data = { "abierto": tiempoOjoAbierto, "cerrado": tiempoOjoCerrado } data_json = json.dumps(data) print(_thread._count()) if (_thread._count() > 4): continue else: _thread.start_new_thread(enviarDatos, (data_json, )) else: xaP = [] xa += 1 cv2.imshow("frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): cv2.destroyAllWindows() vs.stop()
q = 0 while q < 10: q += 1 lock.acquire() print(a.i, "INC") a.i += 1 lock.release() print(thread._count()) def decre(a): #while a.i < 10: q = 0 while q < 10: q -= 1 lock.acquire() print(a.i, "DEC") a.i -= 1 lock.release() obj = a1() thread.start_new_thread(incre, (obj, )) thread.start_new_thread(decre, (obj, )) while thread._count() == 1: pass while thread._count() != 1: print(thread._count())
def threading_setup(): return _thread._count(), threading._dangling.copy()
def threading_setup(): if _thread: return _thread._count(), else: return 1,