def __init__(self, transferer): # Injected transfer mechanism self.transferer = transferer # Synchronization and tasks self.wait_change = queue.Queue() self.expect = 0 self.closed = False # Maintain a list of running greenlets for gevent.killall. # # Abrupt termination of WAL-E (e.g. calling exit, as seen with # a propagated error) will not result in clean-ups # (e.g. 'finally' clauses) being run, so it's necessary to # retain the greenlets, inject asynchronous exceptions, and # then wait on termination. self.greenlets = set([])
def test_waiters_that_cancel(self): q = queue.Queue() def do_receive(q, evt): with gevent.Timeout(0, RuntimeError()): try: result = q.get() evt.set(result) # pragma: no cover (should have raised) except RuntimeError: evt.set('timed out') evt = AsyncResult() gevent.spawn(do_receive, q, evt) self.assertEqual(evt.get(), 'timed out') q.put('hi') self.assertEqual(q.get(), 'hi')
def main(self, name, mesh, host, port, parent): """.""" state = 'unconnected' sock = None to = 1 up = False dataq = queue.Queue() last_heard_from = None while True: if state == 'unconnected': if sock is None: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setTimeout(0.1) try: sock.connect((host, port)) except (socket.error, gevent.Timeout): to = to + random.random() * self.variance else: state = 'connected' receiver = gevent.spawn_link(self.recv, sock, dataq) sender = gevent.spawn_link(self.send, sock, dataq) to = 0 to = to + random.random() * self.variance if up and state != 'connected': if last_heard_from try: pat, msg = self.receive(self.DIE, self.DATA, timeout=to) except gevent.LinkedExited: # Our receiver died for some reason. state, sock = 'unconnected', None continue else: if pat is self.DIE: # The endgame. Someone told us to die. break elif pat is self.DATA: pass
def test_cq_getter_unstuck(): cq = ClosableQueue(fuzz=0.01) trigger_close = queue.Queue() def blocked_reader(): """ Read from the queue when there is nothing to get, shouldn't be stuck forever if queue closed """ trigger_close.put_nowait(1) cq.get() w = gevent.spawn(blocked_reader) trigger_close.get() cq.close() w.join()
def test_get_nowait_unlock(self): result = [] q = queue.Queue(1) p = gevent.spawn(q.put, 5) def store_result(func, *args): result.append(func(*args)) assert q.empty(), q gevent.sleep(0) assert q.full(), q get_hub().loop.run_callback(store_result, q.get_nowait) gevent.sleep(0) assert q.empty(), q assert result == [5], result assert p.ready(), p assert p.dead, p assert q.empty(), q
def listen(self, user, dbname=None): ''' Listen to messages of the user and yield whenever something is there :param user: Id of user. :param dbname: Optionally specify the dbname, if the transaction context is not available ''' if dbname is not None: q = self.store.setdefault(dbname, {}).setdefault(user, queue.Queue()) else: q = self.get_queue(user) while True: try: yield q.get(timeout=5) except queue.Empty: yield '{}'
def test_two_bogus_waiters(self): def do_receive(q, evt): with gevent.Timeout(0, RuntimeError()): try: result = q.get() evt.set(result) except RuntimeError: evt.set('timed out') q = queue.Queue() e1 = AsyncResult() e2 = AsyncResult() gevent.spawn(do_receive, q, e1) gevent.spawn(do_receive, q, e2) gevent.sleep(0.1) q.put('sent') self.assertEqual(e1.get(), 'timed out') self.assertEqual(e2.get(), 'timed out') self.assertEqual(q.get(), 'sent')
def start_webdir_scan(self): output.debug(">> Start first web dir scan.....") #self._webdir_pool.map(self.__webdir_first_scan_schedu, ['/%s/' % str(dir_dic) for dir_dic in self.dictionary['dir_dic']]) exist_dir_cache_que = queue.Queue() for dir_dic in self.dictionary['dir_dic']: self.webdir_pool.apply_async(self.__webdir_scan_schedu, args=(self.default_dir_webscan, '/%s/' % dir_dic, exist_dir_cache_que)) self.webdir_pool.join() self.__push_result2dir(exist_dir_cache_que, [self.exist_dir_que, self.exist_dir_cache_que]) output.debug(">> Start web dir scan....") try: while not self.exist_dir_cache_que.empty(): dir_suffix = self.exist_dir_cache_que.get_nowait() output.debug("[+] found exist dir :%s" % dir_suffix) testwebscan = WebScan(self.requester, test_path=dir_suffix, suffix=None, bdir=True) for dir_dic in self.dictionary['dir_dic']: test_dir_dic = '%s%s/' % (dir_suffix, dir_dic) self.webdir_pool.apply_async(self.__webdir_scan_schedu, args=(testwebscan, test_dir_dic, exist_dir_cache_que)) self.webdir_pool.join() self.__push_result2dir( exist_dir_cache_que, [self.exist_dir_que, self.exist_dir_cache_que]) except queue.Empty as e: pass output.debug("[==] web dir scan over...:") del self.exist_dir_cache_que del exist_dir_cache_que
def test_worker_in_out(): @worker() def f(x): return x * x q_in = ClosableQueue(fuzz=0.01) q_out = ClosableQueue(fuzz=0.01) q_done = queue.Queue() for i in range(4): q_in.put(i) q_in.put(StopIteration) f(q_in, q_out, q_done) # Should have signaled done q_done.get_nowait() q_out.put(StopIteration) assert 3 * 3 + 2 * 2 + 1 == sum(i for i in q_out)
def test_two_bogus_waiters(self): def do_receive(q, evt): gevent.Timeout.start_new(0, RuntimeError()) try: result = q.get() evt.set(result) except RuntimeError: evt.set('timed out') # XXX finally = timeout q = queue.Queue() e1 = AsyncResult() e2 = AsyncResult() gevent.spawn(do_receive, q, e1) gevent.spawn(do_receive, q, e2) gevent.sleep(0) q.put('sent') self.assertEquals(e1.get(), 'timed out') self.assertEquals(e2.get(), 'timed out') self.assertEquals(q.get(), 'sent')
def test_send(self): channel = queue.Queue(0) events = [] def another_greenlet(): events.append(channel.get()) events.append(channel.get()) g = gevent.spawn(another_greenlet) events.append('sending') channel.put('hello') events.append('sent hello') channel.put('world') events.append('sent world') self.assertEqual( ['sending', 'hello', 'sent hello', 'world', 'sent world'], events) g.get()
def test_put_nowait_unlock(self): result = [] q = queue.Queue() p = gevent.spawn(q.get) def store_result(func, *args): result.append(func(*args)) assert q.empty(), q assert not q.full(), q gevent.sleep(0.001) assert q.empty(), q assert not q.full(), q get_hub().loop.run_callback(store_result, q.put_nowait, 10) assert not p.ready(), p gevent.sleep(0.001) assert result == [None], result assert p.ready(), p assert not q.full(), q assert q.empty(), q
def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_networks = set() self.logical_routers = set() self.bgp_router = None self.config_manager = None self.nc_q = queue.Queue(maxsize=1) self.vn_ip_map = {'irb': {}, 'lo0': {}} self.config_sent = False self.init_cs_state() self.update(obj_dict) plugin_params = {"physical_router": self} self.config_manager = DeviceConf.plugin(self.vendor, self.product, plugin_params, self._logger) if self.config_manager: self.set_conf_sent_state(False) self.config_repush_interval = PushConfigState.get_repush_interval() self.nc_handler_gl = vnc_greenlets.VncGreenlet( "VNC Device Manager", self.nc_handler) self.uve_send()
def __init__(self, uploader, max_concurrency, max_members=tar_partition.PARTITION_MAX_MEMBERS): # Injected upload mechanism self.uploader = uploader # Concurrency maximums self.max_members = max_members self.max_concurrency = max_concurrency # Current concurrency burden self.member_burden = 0 # Synchronization and tasks self.wait_change = queue.Queue(maxsize=0) self.closed = False # Used for both synchronization and measurement. self.concurrency_burden = 0
def __init__(self, id=None, key=None, servicepool=None, **kwargs): j.baseclasses.object.__init__(self) if self.__class__._MODEL is None: self.__class__._MODEL = j.world.system._bcdb.model_get( schema=self.__class__._SCHEMA_TXT) self.actions = {} self._state = None self._state_last_save_hash = None # to see when there is change self._data = None self._id = id self._key = None self._running = None self.action_queue = queue.Queue() if topclass: self._init() self._init_pre(**kwargs) if id is None and (key is not None or servicepool is not None): if servicepool is not None and key is None: self.error_bug_raise( "servicepool cannot be None when key is also None, key needs to be specified" ) # need to instantiate, because data given which needs to be remembered if servicepool is not None: key = "%s__%s" % (servicepool, key) else: if key.find("__") != -1: self.error_bug_raise("__ should not be in keyname") self._key = key self._data # will fetch the key self._redis_key_state = self.key.encode() + b":state" self._redis_key_actions_now = b"actions:last" self._running = None
def test_put_nowait_unlock(self): result = [] q = queue.Queue(0) p = gevent.spawn(q.get) def store_result(func, *args): result.append(func(*args)) assert q.empty(), q assert q.full(), q gevent.sleep(0) assert q.empty(), q assert q.full(), q core.active_event(store_result, util.wrap_errors(Exception, q.put_nowait), 10) assert not p.ready(), p gevent.sleep(0) assert result == [None], result assert p.ready(), p assert q.full(), q assert q.empty(), q
def __init__(self, conf_dict): """ Init :param conf_dict: dict :type conf_dict: dict """ # Lock for acquire/release self.pool_lock = Lock() # Store self.conf_dict = conf_dict # Max size self.max_size = self.conf_dict.get("pool_max_size", 10) # Alloc self.pool = queue.Queue(maxsize=self.max_size) # Init self.size = 0
def _app(): class Broker: def subscribe(self, subscriber): for idx, _ in enumerate(LIFECYCLE_EVENTS): subscriber.put(event(idx)) subscriber.put(StopIteration) def unsubscribe(self, queue): queue.put(StopIteration) app = vadvisor.app.rest.app broker = Broker() app.eventBroker = broker app.eventStore = InMemoryStore() q = queue.Queue() broker.subscribe(q) for element in q: app.eventStore.put(element) return app
def test_two_waiters_one_dies(self): def waiter(q, evt): evt.set(q.get()) def do_receive(q, evt): with gevent.Timeout(0, RuntimeError()): try: result = q.get() evt.set(result) except RuntimeError: evt.set('timed out') q = queue.Queue() dying_evt = AsyncResult() waiting_evt = AsyncResult() gevent.spawn(do_receive, q, dying_evt) gevent.spawn(waiter, q, waiting_evt) gevent.sleep(0.1) q.put('hi') self.assertEqual(dying_evt.get(), 'timed out') self.assertEqual(waiting_evt.get(), 'hi')
def check_proxy(proxies): # 检查数据库是否存在 exists = [ r['ip'] for r in coll.find( {"ip": { "$in": list(set([t['ip'] for t in proxies])) }}, {'ip': 1}) ] spawns = [] q = queue.Queue() for p in proxies: if p['ip'] not in exists: spawns.append(gevent.spawn(baidu_check, p, q)) print('new ip num:%s' % len(spawns)) # 检查网络可用性 MAX_CURRENT_NUM = 20 t0 = time.time() while True: if len(spawns) > MAX_CURRENT_NUM: gevent.joinall(spawns[:MAX_CURRENT_NUM]) spawns = spawns[MAX_CURRENT_NUM:] else: gevent.joinall(spawns) break print(time.time() - t0) ok_ps = [] while True: try: p = q.get_nowait() ok_ps.append(p) except Exception as e: print(str(e)) break print('ok ip num:%s' % len(ok_ps)) return ok_ps
def __init__(self, connectionstring, poolsize, modulename='pyodbc'): conns = self.conns = [ DBConnection_(socket_.socketpair()) for _ in xrange(poolsize) ] tp = self.threadpool = ThreadPool(poolsize) q = self.queue = queue.Queue(poolsize) # check if the module imports submodules, e.g. mysql-connector-python needs "mysql.connector" modulename_parts = modulename.split(".") # the normal pyodbc, sqlite modules will have normally have 1 part if (len(modulename_parts) == 1): module = __import__(modulename) # the mysql-connector-python uses mysql.connector as imported module elif (len(modulename_parts) == 2): fromlist = [str(modulename_parts[1])] module = __import__(modulename, fromlist=fromlist) for conn in conns: tp.spawn(self.worker, conn) conn.connect(connectionstring, module) q.put(conn) class Exc(object): pass self.exc = Exc for e in _exceptions: setattr(Exc, e, getattr(module, e)) # special treatment for the mysql-connector-python if modulename == "mysql.connector": # instead of using the modules paramstyle "pyformat" -> use quote paramstyle # as it seems to be compatible without extending the db_adapter self.paramstyle = "quote" else: self.paramstyle = module.paramstyle
def __init__(self, socket, LAST_MESSAGES, rabbitcox, monitoring): (self.info_ip, self.info_port) = socket.getpeername() # logger = %s:%s logging.getLogger('radiovisserver.%s:%s stompserver.' + ip + '.' + str(port)) self.socket = socket # Buffer for icoming data self.incomingData = '' # Topic the client subscribled to self.topics = [] # Queue of messages self.queue = queue.Queue(maxsize=250) # Lock to send frame self.lock = RLock() # Mapping channel -> id for subscritions self.idsByChannels = {} # Mapping id- -> channel for subscritions self.channelsByIds = {} # Last messages self.LAST_MESSAGES = LAST_MESSAGES # RabbitCox self.rabbitcox = rabbitcox # Monitoring self.monitoring = monitoring # Create a session id self.session_id = str(uuid.uuid4()) # Station id, if authenticated self.station_id = None # True if threads should be stopped self.sucide = False
def setup_worker_listener(addrs, tq, rq): wsock = ctx.socket(zmq.ROUTER) wsock.bind(addrs['workers']) readyq = queue.Queue() compcache = {} @spawn def send_work(): for addr, task in tq: print ' >', ' '.join(addr) if task.hash not in compcache: try: rsp = Renderer.compile(task.anim, arch='sm_35') except: # Store exceptions, so that we don't try to endlessly # recompile bad genomes import traceback rsp = traceback.format_exc() print 'Error while compiling task:', rsp compcache[task.hash] = rsp else: rsp = compcache[task.hash] if isinstance(rsp, basestring): continue packer, lib, cubin = rsp ctask = FullTask(addr, task, cubin, packer) worker_addr = readyq.get() wsock.send_multipart([worker_addr, '', pickle.dumps(ctask)]) @spawn def read_rsps(): while True: rsp = wsock.recv_multipart(copy=False) if rsp[2].bytes != '': print '< ', rsp[2].bytes, rsp[3].bytes rq.put(rsp[2:]) readyq.put(rsp[0])
def test_max_size(self): q = queue.Queue(2) results = [] def putter(q): q.put('a') results.append('a') q.put('b') results.append('b') q.put('c') results.append('c') return "OK" p = gevent.spawn(putter, q) gevent.sleep(0) self.assertEqual(results, ['a', 'b']) self.assertEqual(q.get(), 'a') gevent.sleep(0) self.assertEqual(results, ['a', 'b', 'c']) self.assertEqual(q.get(), 'b') self.assertEqual(q.get(), 'c') assert p.get(timeout=0) == "OK"
def __init__(self, name=None, binding=None, **kwargs): """ Initializer for a recv channel. You may set the receiving name and binding here if you wish, otherwise they will be set when you call setup_listener. """ self._recv_queue = gqueue.Queue() # set recv name and binding if given assert name is None or isinstance(name, tuple) self._recv_name = name self._recv_binding = binding self._setup_listener_called = False BaseChannel.__init__(self, **kwargs) # setup RecvChannel specific state transitions self._fsm.add_transition(self.I_START_CONSUME, self.S_ACTIVE, self._on_start_consume, self.S_CONSUMING) self._fsm.add_transition(self.I_STOP_CONSUME, self.S_CONSUMING, self._on_stop_consume, self.S_ACTIVE) self._fsm.add_transition(self.I_CLOSE, self.S_CONSUMING, self._on_close_while_consume, self.S_CLOSED)
def test_basexmlstream(): q = queue.Queue() def expected(event, elem=None): e, el = q.get(block=False) assert e == event if elem is not None: assert element_eq(el, elem) class TestStream(BaseXMLStream): def handle_stream_start(self, elem): q.put(("stream_start", elem)) def handle_element(self, elem): q.put(("element", elem)) def handle_parse_error(self, exc_type, exc_value, exc_traceback): raise exc_value.with_traceback(exc_traceback) def handle_stream_end(self): q.put(("stream_end", None)) def handle_close(self): q.put(("close", None)) t = TestStream() t._feed("<stream key='value'>") expected("stream_start", etree.Element("stream", {"key": "value"})) e = etree.Element("message", {}) b = etree.Element("body", {}) b.text = "foobar" e.append(b) t._feed("<message><body>foobar</body></message>") expected("element", e) t._feed("</stream>") expected("stream_end") with pytest.raises(etree.XMLSyntaxError, match="[eE]xtra content"): t._feed("</stream>") expected("close")
def __init__(self, config): self.config = config self.emails = queue.Queue() self.adapters = {} # pushbank logger current_thread().name = 'MAIN' root_logger = logging.getLogger() root_logger.level = config.log_level log_formatter = logging.Formatter( "%(asctime)s [%(levelname)-5.5s] [%(threadName)s] %(message)s") file_handler = logging.FileHandler('tmp/stdout.log') file_handler.setFormatter(log_formatter) root_logger.addHandler(file_handler) console_handler = logging.StreamHandler() console_handler.setFormatter(log_formatter) root_logger.addHandler(console_handler) school = config.SCHOOL if not util.verify(school): print u"학번 정보를 정확히 입력해 주세요." sys.exit(1) s = util.get_semester()['semester'] if s == "1R": print u"현재 학기는 1학기 입니다." else: print u"현재 학기는 2학기 입니다." # log in to sugang server self.login(school) # connect smtp server self.connected = connect_mail(server=config.EMAIL['SMTP_SERVER'], port=config.EMAIL['SMTP_PORT'], user=config.EMAIL['SMTP_USER'], passwd=config.EMAIL['SMTP_PASSWORD'], tls=config.EMAIL['SMTP_TLS'])
def chat_room(request, chat_room_id): """ After picking a chat room add the user to the subscriber and add the chatroom too profile """ try: chatroom = ChatRoom.objects.get(pk=chat_room_id) user = Profile.objects.get(user=request.user) user.chat_room_name = chatroom.name user.save() active = Profile.objects.filter(chat_room_name=chatroom.name) context = { 'chatroom': chatroom, 'subs': active, 'rooms': chatroom.name, } if request.user.profile: chatroom.add_subscriber(request.user.profile) QUEUES[chatroom.name][request.user.username] = queue.Queue() return render(request, 'chat/chat_room.html', context) except ChatRoom.DoesNotExist: return redirect(reverse('four'))
def start_webfileext_scan(self): ''' 脚本备份文件爆破 /help/a.php -> /help/a.php.bak :return: ''' output.warning(">> Start file ext scan....") exist_webfileext_que = queue.Queue() try: while not self.exist_file_cache_que.empty(): testwebscan, exist_file = self.exist_file_cache_que.get_nowait( ) try: ext_pos = exist_file.rindex(".") if exist_file[ext_pos:] in [ '.php', '.asp', '.jsp', 'jspx', 'aspx' ]: for bakfile_ext in self.dictionary['bakfile_exts']: self.webfile_pool.apply_async( self.__webfile_scan_schedu, args=(testwebscan, '%s%s' % (exist_file, bakfile_ext), exist_webfileext_que)) self.webfile_pool.join() self.__push_result2file( exist_webfileext_que, [self.exist_file_cache_que, self.exist_sensi_que]) except ValueError: pass except queue.Empty as e: pass del exist_webfileext_que del self.exist_file_cache_que
def __init__(self, config, passphrases): self.passphrases = passphrases ## # Stats ## # How many successful GETs have we had? self.hits = 0 # How many unsuccesful GETs have we had? (not counting pending jobs) self.misses = 0 # How many uploads have we had? self.uploads = 0 # Mapping of file hashes to gevent Events self.pending = {} self.load_config(config) self.messages = queue.Queue() # Start our message handling loop self._message_loop_thread = gevent.spawn(self.process_messages) # Start our cleanup loop self._cleanup_loop_thead = gevent.spawn(self.cleanup_loop)