def test_if_pipe_blocks(self): r, w = self.pipe() # set nbytes such that for sure it is > maximum pipe buffer nbytes = 1000000 block = six.b('x') * 4096 buf = buffer(block) # Lack of "nonlocal" keyword in Python 2.x: bytesread = [0] byteswritten = [0] def produce(): while byteswritten[0] != nbytes: bytesleft = nbytes - byteswritten[0] byteswritten[0] += self.write(w, buf[:min(bytesleft, 4096)]) def consume(): while bytesread[0] != nbytes: bytesleft = nbytes - bytesread[0] bytesread[0] += len(self.read(r, min(bytesleft, 4096))) producer = Greenlet(produce) producer.start() consumer = Greenlet(consume) consumer.start_later(1) # If patching was not succesful, the producer will have filled # the pipe before the consumer starts, and would block the entire # process. Therefore the next line would never finish. joinall([producer, consumer]) assert bytesread[0] == nbytes assert bytesread[0] == byteswritten[0]
class StreamFactory(object): def __init__(self, auth, redis, chan, filters): self.auth = auth self.redis = redis self.chan = chan self.filters = filters self.g = None def start(self): if self.g is not None: raise "A greenlet is already there" self.g = Greenlet(self._start) self.g.start() return self.g def kill(self): if self.g is not None: self.g.kill() self.g = None def restart(self): self.kill() self.start() def _start(self): for tweet in TwitterStream(auth=self.auth).statuses.filter(**self.filters): # XXX: have to dump tweet, redis seems to do not know how to dump it itself. self.redis.publish(self.chan, json.dumps(tweet))
def _run_impl(self): sync = Greenlet(retry_with_logging, self.sync, account_id=self.account_id, logger=self.log) sync.start() while not sync.ready(): if self.shutdown.is_set(): # Ctrl-c, basically! self.log.info('Stopping sync', email=self.email_address, account_id=self.account_id) # Make sure the parent can't start/stop any folder monitors # first sync.kill(block=True) return self._cleanup() else: sleep(self.heartbeat) if sync.successful(): return self._cleanup() # We just want the name of the exception so don't bother with # sys.exc_info() self.log.error('mail sync should run forever', provider=self.provider_name, account_id=self.account_id, exception=type(sync.exception).__name__) raise sync.exception
def ws(self): # open websocket websock = request.environ["ws4py.websocket"] # websock_version = request.environ['wsgi.websocket_version'] # sec_websocket_extensions = request.environ.get('HTTP_SEC_WEBSOCKET_EXTENSIONS') # sec_websocket_key = request.environ.get('HTTP_SEC_WEBSOCKET_KEY') # sec_websocket_version = request.environ.get('HTTP_SEC_WEBSOCKET_VERSION') endpoint = websock.sock.getpeername() # print 'connection established with endpoint %s:%s, version %s / %s, key %s, extensions %s' \ # % (endpoint[0], endpoint[1], websock_version, sec_websocket_version, sec_websocket_key, sec_websocket_extensions) from gevent import Greenlet g1 = Greenlet(websock.run) g1.start() websock.send("Hello dear Browser! I'll send you redis stuff when I get some") g2 = Greenlet(send_stuff_in_intervals, websock) # g2 = Greenlet(send_redis_stuff, websock) g2.start() g2.join() g1.join() print "connection closed to %s:%s" % endpoint
def _test_rbc2(N=4, f=1, leader=None, seed=None): # Crash up to f nodes #if seed is not None: print 'SEED:', seed sid = 'sidA' rnd = random.Random(seed) router_seed = rnd.random() if leader is None: leader = rnd.randint(0,N-1) sends, recvs = simple_router(N, seed=router_seed) threads = [] leader_input = Queue(1) for i in range(N): input = leader_input.get if i == leader else None t = Greenlet(reliablebroadcast, sid, i, N, f, leader, input, recvs[i], sends[i]) t.start() threads.append(t) m = "Hello!asdfasdfasdfasdfasdfsadf" leader_input.put(m) gevent.sleep(0) # Let the leader get out its first message # Crash f of the nodes crashed = set() #print 'Leader:', leader for _ in range(f): i = rnd.choice(range(N)) crashed.add(i) threads[i].kill() threads[i].join() #print 'Crashed:', crashed gevent.joinall(threads) for i,t in enumerate(threads): if i not in crashed: assert t.value == m
def setup(): """ setup xAAL Engine & Device. And start it in a Greenlet""" helpers.setup_console_logger() global monitor engine = Engine() cfg = tools.load_cfg(PACKAGE_NAME) if not cfg: logger.info('Missing config file, building a new one') cfg = tools.new_cfg(PACKAGE_NAME) cfg.write() dev = Device("hmi.basic") dev.address = cfg['config']['addr'] dev.vendor_id = "IHSEV" dev.product_id = "WEB Interface" dev.version = 0.1 dev.info = "%s@%s" % (PACKAGE_NAME, platform.node()) engine.add_device(dev) db_server = None if 'db_server' in cfg['config']: db_server = cfg['config']['db_server'] else: logger.info('You can set "db_server" in the config file') monitor = Monitor(dev, filter_func=monitor_filter, db_server=db_server) monitor.subscribe(event_handler) engine.start() green_let = Greenlet(xaal_loop, engine) green_let.start()
def setup_server(grid): '''sets up the server for transmitting grid info''' class BIServ(zerorpc.Server): def get_box_pts(self): return grid.get_box_pts() def get_goal_pts(self): return grid.get_goal_pts() def get_obs_pts(self): return grid.get_obs_pts() def get_flock_pts(self): return grid.get_flock_pts() def get_flock1(self): return grid.get_flock1() def get_flock2(self): return grid.get_flock2() srv = BIServ() srv.bind("tcp://0.0.0.0:4242") g = Greenlet(srv.run) g.start() return srv
def start_worker_with_task(config, processed_task_queue, task, worker_pool): """ Создание воркера на выполнение task :param config: конфигурация :type config: Config :param processed_task_queue :type processed_task_queue: gevent_queue.Queue :param task :type task: Task :param worker_pool :type worker_pool: gevent.pool.Pool """ worker = Greenlet( notification_worker, task, processed_task_queue, timeout=config.HTTP_CONNECTION_TIMEOUT, verify=False ) worker_pool.add(worker) worker.start()
def make_new_tcp_server(self, data, reply_address): DBG("NetoolCommandServer make_new_tcp_server %r" % data) try: g = Greenlet(start_tcp_server, data) g.start() result = AsyncResult() conn_greenlets[data['name']] = { 'greenlet': g, 'result': result, 'data': None } re = result.get() value = re['result'] if value != 'ok': del (conn_greenlets[data['name']]) re = self.socket.sendto(json.dumps(re), reply_address) DBG("sent %d" % re) except: DBG_TRACE() self.socket.sendto( json.dumps({ 'result': 'error', 'msg': base64.b64encode(sys.exc_info()[1]) }), reply_address)
def _run_impl(self): sync = Greenlet(retry_and_report_killed, self.sync, account_id=self.account_id, logger=self.log, fail_classes=self.retry_fail_classes) sync.start() while not sync.ready(): try: cmd = self.inbox.get_nowait() if not self.process_command(cmd): # ctrl-c, basically! self.log.info("Stopping sync", email=self.email_address) # make sure the parent can't start/stop any folder monitors # first sync.kill(block=True) self.folder_monitors.kill() return except Empty: sleep(self.heartbeat) if sync.successful(): self.folder_monitors.kill() return # We just want the name of the exception so don't bother with # sys.exc_info() self.log.error('mail sync should run forever', provider=self.provider_name, account_id=self.account_id, exception=type(sync.exception).__name__) raise sync.exception
def start_server(self): self.check_old_files(self.root_dir) self.start_folder_watcher() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((self.ip_address, self.port_listen)) while 1: try: s.listen(1) conn, addr = s.accept() data = conn.recv(8192) if not self.no_greenlet: g = Greenlet(self.handle_request, data, conn, addr, self.t_tracker) g.start() else: self.handle_request(data, conn, addr) except KeyboardInterrupt: print "^C detected" s.close() break except SystemExit: break except Exception, e: print e, 'p' traceback.print_exc(file=sys.stdout)
def _run_impl(self): sync = Greenlet(retry_and_report_killed, self.sync, account_id=self.account_id, logger=self.log) sync.link_value(lambda _: report_stopped(account_id=self.account_id)) sync.start() while not sync.ready(): try: cmd = self.inbox.get_nowait() if not self.process_command(cmd): # ctrl-c, basically! self.log.info("Stopping sync", email=self.email_address) # make sure the parent can't start/stop any folder monitors # first sync.kill(block=True) self.folder_monitors.kill() return except Empty: sleep(self.heartbeat) if sync.successful(): self.folder_monitors.kill() return self.log.error("mail sync should run forever", provider=self.provider_name, account_id=self.account_id) raise sync.exception
class MongoThread(object): """A thread, or a greenlet, that uses a Connection""" def __init__(self, test_case): self.use_greenlets = test_case.use_greenlets self.connection = test_case.c self.db = self.connection[DB] self.ut = test_case self.passed = False def start(self): if self.use_greenlets: self.thread = Greenlet(self.run) else: self.thread = threading.Thread(target=self.run) self.thread.start() def join(self): self.thread.join() self.thread = None def run(self): self.run_mongo_thread() # No exceptions thrown self.passed = True def run_mongo_thread(self): raise NotImplementedError()
def toBeScheduled(): for i in iterList: bc = bcList[i] # makeBroadcast(i) sd = sdList[i] recv = servers[0].get th = Greenlet(honestParty, i, N, t, controlChannels[i], bc, recv, sd, options.B) th.parent_args = (N, t) th.name = 'client_test_freenet.honestParty(%d)' % i controlChannels[i].put(('IncludeTransaction', transactionSet)) th.start() mylog('Summoned party %i at time %f' % (i, time.time()), verboseLevel=-1) ts.append(th) try: gevent.joinall(ts) except ACSException: gevent.killall(ts) except finishTransactionLeap: ### Manually jump to this level print 'msgCounter', msgCounter print 'msgTypeCounter', msgTypeCounter # message id 0 (duplicated) for signatureCost logChannel.put(StopIteration) mylog("=====", verboseLevel=-1) for item in logChannel: mylog(item, verboseLevel=-1) mylog("=====", verboseLevel=-1) except gevent.hub.LoopExit: # Manual fix for early stop while True: gevent.sleep(1) checkExceptionPerGreenlet() finally: print "Consensus Finished"
class WebSocketClient(WebSocketBaseClient): def __init__(self, url, protocols=None, extensions=None): WebSocketBaseClient.__init__(self, url, protocols, extensions) self._th = Greenlet(self.run) self.messages = Queue() def handshake_ok(self): self._th.start() def received_message(self, message): self.messages.put(copy.deepcopy(message)) def closed(self, code, reason=None): # When the connection is closed, put a StopIteration # on the message queue to signal there's nothing left # to wait for self.messages.put(StopIteration) def receive(self): # If the websocket was terminated and there are no messages # left in the queue, return None immediately otherwise the client # will block forever if self.terminated and self.messages.empty(): return None message = self.messages.get() if message is StopIteration: return None return message
class Channel(object): def __init__(self, socket): self.socket = socket self.running = False self.incoming = Queue(None) self.outgoing = Queue(None) self.reader = Greenlet(self.do_read) self.writer = Greenlet(self.do_write) def do_read(self): while self.running: data = self.socket.receive() if not data: break if data: self.incoming.put(parse_message(data)) def do_write(self): while self.running: msg = self.outgoing.get() self.socket.send(flatten_message(msg)) def is_running(): return self.running and not any( [self.reader.ready(), self.writier.ready()]) def run(self): self.running = True self.reader.start() self.writer.start() def wait(self): self.running = False gevent.killall([self.reader, self.writer]) def receive(self): return self.incoming.get() def send(self, type_, data): return self.outgoing.put(message(type_, data)) def send_ping(self): return self.send('ping', time.time()) def send_spawn(self, avatar): return self.send('spawn', avatar.stat()) def send_die(self, avatar): return self.send('die', avatar.uid) def send_state(self, avatars): return self.send('state', [i.stat() for i in avatars]) def send_update(self, avatar): return self.send('update', avatar.stat())
def _test_if_pipe_blocks(self, buffer_class): r, w = self.pipe() # set nbytes such that for sure it is > maximum pipe buffer nbytes = 1000000 block = b'x' * 4096 buf = buffer_class(block) # Lack of "nonlocal" keyword in Python 2.x: bytesread = [0] byteswritten = [0] def produce(): while byteswritten[0] != nbytes: bytesleft = nbytes - byteswritten[0] byteswritten[0] += self.write(w, buf[:min(bytesleft, 4096)]) def consume(): while bytesread[0] != nbytes: bytesleft = nbytes - bytesread[0] bytesread[0] += len(self.read(r, min(bytesleft, 4096))) producer = Greenlet(produce) producer.start() consumer = Greenlet(consume) consumer.start_later(1) # If patching was not succesful, the producer will have filled # the pipe before the consumer starts, and would block the entire # process. Therefore the next line would never finish. joinall([producer, consumer]) self.assertEqual(bytesread[0], nbytes) self.assertEqual(bytesread[0], byteswritten[0])
def _run_impl(self): sync = Greenlet(retry_and_report_killed, self.sync, account_id=self.account_id, logger=self.log) sync.start() while not sync.ready(): try: cmd = self.inbox.get_nowait() if not self.process_command(cmd): # ctrl-c, basically! self.log.info("Stopping sync", email=self.email_address) # make sure the parent can't start/stop any folder monitors # first sync.kill(block=True) self.folder_monitors.kill() return except Empty: sleep(self.heartbeat) if sync.successful(): self.folder_monitors.kill() return self.log.error("mail sync should run forever", provider=self.provider_name, account_id=self.account_id) raise sync.exception
def thrFunc(s): print "%s start.\n" %s gevent.sleep(2) gThr = Greenlet(subFunc) gThr.start() gThr.join() print "%s end.\n" %s
def thrFunc(s): print "%s start.\n" % s gevent.sleep(2) gThr = Greenlet(subFunc) gThr.start() gThr.join() print "%s end.\n" % s
class Watchdog(object): """ Runs a callable in a gevent greenlet, and restarts the greenlet with the same callable iff any exception is raised from the greenlet. Uses exponential backoff to respawn the greenlet until eventually it gives up. """ def __init__(self, func, backoff=None): if not callable(func): raise ValueError('Func argument is not callable') if not backoff: backoff = TimeSensitiveBackoff() self.func = func self.backoff = backoff def __call__(self, greenlet): try: logger.exception(greenlet.exception) time.sleep(self.backoff.next()) greenlet = self.respawn() return greenlet except StopIteration: _failhard('backoff exceeded', self.func, greenlet) get_hub().parent.throw(SystemExit()) def spawn(self): self.greenlet = Greenlet(self.func) self.greenlet.link_exception(self) self.greenlet.start() return self.greenlet def respawn(self): return self.spawn()
def _spawn_with_linktype_callback(link_func, callback, func, *args, **kwargs): if link_func not in ('link', 'link_exception', 'link_value'): raise Exception('link_func %s is not a valid link type' % link_func) g = Greenlet(func, *args, **kwargs) getattr(g, link_func)(callback) g.start() return g
def start(self): """ Starts greenlet for resource monitoring. """ log.debug("%r: starting resource monitoring %s", self._platform_id, self) self._active = True runnable = Greenlet(self._run) runnable.start()
def start_folder_watcher_task(self): while 1: if not self.no_greenlet: g = Greenlet(self.check_folder) g.start() else: self.check_folder() sleep(300)
class Publisher(object): # maps key -> Publisher pubs = {} @staticmethod def register(key, sub): pub = Publisher.pubs.get(key, None) if pub is None: pub = Publisher(key, sub) Publisher.pubs[key] = pub else: pub.addsub(sub) sub.pubs.add(pub) def __init__(self, key, sub): self.key = key self.subs = set([sub]) self.greenlet = Greenlet(Publisher.serve, self) self.greenlet.start() # ask for data from the last minute self.query_time = None # datetime.now() - timedelta(minutes = 1) # last 20 results so that we have something to show to new subs self.cache = deque(maxlen=10) def addsub(self, sub): # serve the subscriber the cached data data = simplejson.dumps(list(self.cache)) sub.queue.put(data) self.subs.add(sub) def serve(self): while self.subs: data = self.scrape() # blocking call for s in self.subs: s.queue.put(data) sleep() # yield to clients # remove self del Publisher.pubs[self.key] def scrape(self): ## Fetch the data for that self.key. while self.subs: print 'Pub(%s) @%s' % (self.key, self.query_time) data, next_time = DG.query(self.key[0], self.key[1], 0.2, # degrees! self.query_time) print 'Pub(%s) got %s @%s' % (self.key, len(data), next_time) if data: break sleep(1.0) # wait for data for 100ms self.query_time = next_time # convert to primitive types for simplejson for d in data: d['time'] = str(d['time']) self.cache.extend(data) data = simplejson.dumps(data) return data
def random_delay_multivalue_consensus(N, t, inputs): mylog("[Tor] Making circuits...") # Now we don't use stem maxdelay = 0.01 buffers = map(lambda _: Queue(1), range(N)) # Instantiate the "broadcast" instruction def makeBroadcast(i): chans = [] # First establish N connections (including a self connection) for j in range(N): host, port = TOR_MAPPINGS[j] chans.append(connect_to_channel(host, port, i)) def _broadcast(v): mylog(bcolors.OKGREEN + "[%d] Broadcasted %s" % (i, repr(v)) + bcolors.ENDC) for j in range(N): chans[j].put((i, v)) return _broadcast # Create the servers servers = [] for i in range(N): _, port = TOR_MAPPINGS[i] servers.append(listen_to_channel(port)) gevent.sleep(2) print 'servers started' ts = [] for i in range(N): bc = makeBroadcast(i) recv = servers[i].get vi = inputs[i] th = Greenlet(mv84consensus, i, N, t, vi, bc, recv) th.start() # start_later(random.random() * maxdelay) ts.append(th) try: gevent.joinall(ts) except gevent.hub.LoopExit: # Manual fix for early stop agreed = "" for key, value in globalState.items(): if globalState[key] != "": agreed = globalState[key] for key, value in globalState.items(): if globalState[key] == "": globalState[key] = agreed if globalState[key] != agreed: print "Consensus Error" print globalState
def notify_me_if_gm_version_updated(self): """ Sends a respond back to a worker indicating that the current global model is more recent than the model version indicated by the worker. """ try: query_request = request.json valid_failed = DCFServer.validate_input( query_request, [WORKER_ID_KEY, LAST_WORKER_MODEL_VERSION, SIGNED_PHRASE], [str, object, str] ) if ERROR_MESSAGE_KEY in valid_failed: logger.error(valid_failed[ERROR_MESSAGE_KEY]) return json.dumps({ERROR_MESSAGE_KEY: valid_failed[ERROR_MESSAGE_KEY]}) worker_id = query_request[WORKER_ID_KEY] if not self.worker_manager.is_worker_allowed(worker_id): logger.warning(f"Unknown worker {worker_id[0:WID_LEN]} tried to get the global model.") return INVALID_WORKER if not self.worker_manager.verify_challenge(worker_id, query_request[SIGNED_PHRASE]): logger.error(f"Failed to verify worker with id {worker_id[0:WID_LEN]}") return INVALID_WORKER if not self.worker_manager.is_worker_registered(worker_id): logger.warning(f"Unregistered worker {worker_id[0:WID_LEN]} tried to get the global model.") return UNREGISTERED_WORKER logger.info(f"Received request for global model version change notification from {worker_id[0:WID_LEN]}.") # in case a new request is made, terminate the old one if worker_id in self.model_version_req_dict and \ len(self.model_version_req_dict[worker_id]) > 0: old_g, old_b = self.model_version_req_dict[worker_id].pop() msg = f"New request for global model version change notification received from {worker_id[0:WID_LEN]} - " \ "existing request terminated." logger.info(msg) old_b.put(msg) old_b.put(StopIteration) old_g.kill() if len(self.model_version_req_dict[worker_id]) > 0: message_seriously_wrong(f"in 'return_global_model', " f"more than one entry in the 'mode_req_dict' for {worker_id[0:WID_LEN]}") body = gevent.queue.Queue() g = Greenlet(self.check_model_version_updated, worker_id, body, query_request[LAST_WORKER_MODEL_VERSION]) self.gevent_pool.add(g) if worker_id not in self.model_version_req_dict: self.model_version_req_dict[worker_id] = [] self.model_version_req_dict[worker_id].append((g, body)) g.start() return body except Exception as e: logger.warning(str(e.__class__) + str(e)) return str(e)
def _start_publisher_greenlet(self): assert self._publisher_active is False self._set_publisher_rate() self._publisher_active = True runnable = Greenlet(self._run_publisher) runnable.start() log.debug("%r: publisher greenlet started, dispatch rate=%s", self._platform_id, self._pub_rate)
def random_delay_multivalue_consensus(N, t, inputs): mylog("[Tor] Making circuits...") # Now we don't use stem maxdelay = 0.01 buffers = map(lambda _: Queue(1), range(N)) # Instantiate the "broadcast" instruction def makeBroadcast(i): chans = [] # First establish N connections (including a self connection) for j in range(N): host, port = TOR_MAPPINGS[j] chans.append(connect_to_channel(host, port, i)) def _broadcast(v): mylog(bcolors.OKGREEN + "[%d] Broadcasted %s" % (i, repr(v)) + bcolors.ENDC) for j in range(N): chans[j].put( (i,v) ) return _broadcast # Create the servers servers = [] for i in range(N): _, port = TOR_MAPPINGS[i] servers.append(listen_to_channel(port)) gevent.sleep(2) print 'servers started' ts = [] for i in range(N): bc = makeBroadcast(i) recv = servers[i].get vi = inputs[i] th = Greenlet(mv84consensus, i, N, t, vi, bc, recv) th.start() # start_later(random.random() * maxdelay) ts.append(th) try: gevent.joinall(ts) except gevent.hub.LoopExit: # Manual fix for early stop agreed = "" for key, value in globalState.items(): if globalState[key] != "": agreed = globalState[key] for key, value in globalState.items(): if globalState[key] == "": globalState[key] = agreed if globalState[key] != agreed: print "Consensus Error" print globalState
def start(self): """ Starts greenlet for resource monitoring. """ if log.isEnabledFor(logging.DEBUG): log.debug("CIDEVSA-450 %r: starting resource monitoring %s", self._platform_id, str(self)) self._active = True runnable = Greenlet(self._run) runnable.start()
def spawn_named(name: str, task: Callable, *args: Any, **kwargs: Any) -> Greenlet: """ Helper function to spawn a greenlet with a name. """ greenlet = Greenlet(task, *args, **kwargs) greenlet.name = name greenlet.start() return greenlet
def start(self): greenlet = Greenlet(self._start) greenlet.link_exception(self._logGreenletError) TIMER = gevent.greenlet.Greenlet(self._timer) TIMER.start() # Start and wait until the log server stops (main greenlet). greenlet.start() greenlet.join()
def listen_msg(): logger.info(ConstLog.socketio + 'ListenMsgThreading Started') ps = db0.pubsub() ps.psubscribe(Channel.msg_alarm + '*') for item in ps.listen(): if item is not None: logger.info('LISTEN MSG ' + str(item)) if item['type'] == 'pmessage': thr = Greenlet(run=handle_msg, msg_key=item['data'].decode()) thr.start()
def StartGame(self): self.game_id = self.manager.CreateGame(self.game_name, self.players)[0] for p in self.players: CardNamespace.players[p].emit('go_to_game_table') while False in [CardNamespace.players[p].ready for p in self.players]: print >>sys.stderr, [CardNamespace.players[p].ready for p in self.players] sleep(0.05) g = Greenlet(self.manager.StartGame, self.game_id) g.start() g.join()
class BaseTestCase(TestCase): def start_server(self, host, port, linked_servers=[]): result = ObjectoPlex((host, port), middlewares=[ PingPongMiddleware(), LegacySubscriptionMiddleware(), StatisticsMiddleware(), ChecksumMiddleware(), RoutingMiddleware(), ], linked_servers=linked_servers) gevent.signal(signal.SIGTERM, result.stop) gevent.signal(signal.SIGINT, result.stop) Greenlet.spawn(result.serve_forever) sleep(0.1) return result def start_client_registry(self, host, port): self.service = ClientRegistry(host, port) self.service_greenlet = Greenlet(self.service.start) gevent.signal(signal.SIGTERM, self.service_greenlet.kill) gevent.signal(signal.SIGINT, self.service_greenlet.kill) self.service_greenlet.start() logger.info('Started client registry, connecting to %s:%s', host, port) def stop_client_registry(self): self.service.cleanup() self.service_greenlet.kill() logger.info('Stopped client registry, connecting to %s:%s', _host, _port) def assertCorrectClientListReply(self, obj, payload): self.assertIn('clients', payload, msg=u"attribute 'clients' not in payload") d = None for dct in payload['clients']: self.assertIn( 'routing-id', dct, msg=u"attribute 'routing-id' not in list item in clients list") if dct['routing-id'] == self.routing_id: d = dct break self.assertIsNotNone( d, msg=u'Client not present in returned client listing') self.assertEquals(obj.metadata['client'], d['client'], msg=u"attribute 'client' not equal") self.assertEquals(obj.metadata['user'], d['user'], msg=u"attribute 'user' not equal")
def do_the_calculations(nworkers): mythreads = [] for i in range(nworkers): hostname = hostnames[i % len(hostnames)] #print'starting worker #%s' %i b = Greenlet(runcmd, hostname) b.start() mythreads.append(b) joinall(mythreads) return
def __init__(self, k, t, nodeid, sk, pk, participantids, participantkeys, group, symflag, send_function, recv_function, sid=1, reconstruction=True, seed=None): self.group = group self.send_function = send_function self.participantids = participantids self.participantkeys = participantkeys self.reconstruction = reconstruction self.nodeid = nodeid self.t = t self.k = k self.dealerid = k #maybe CRS would be a more fitting name since pk doesn't go with sk? self.pk = pk self.sk = sk self.sharedkey = participantkeys[self.dealerid]**sk self.rbfinished = False self.finished = False self.sendrecs = False self.sharevalid = False self.okcount = 0 self.implicatecount = 0 self.output = None self.secret = None self.pc = PolyCommitNP(t=t, pk=pk, group=group) self.shares = {} self.queues = {} self.recvs = {} msgtypes = ["rb", "hbavss"] for msgtype in msgtypes: self.queues[msgtype] = Queue() self.recvs[msgtype] = self.makeRecv(msgtype) rb_thread = Greenlet(rbc_and_send, sid, nodeid, k + 1, t, k, None, self.recvs["rb"], send_function) rb_thread.start() #send_function(nodeid, ["send", reliablebroadcast(sid, nodeid, k+1, f=t, leader=k, input=None, receive=self.recvs["rb"], send=send_function)]) while not self.finished: sender, msg = recv_function() self.receive_msg(sender, msg)
def net_runner(net_state, config, stratum_clients, server_state, celery, exit_event): logger.info("Network monitor starting up; Thread ID {}" .format(threading.current_thread())) network = Greenlet(monitor_network, stratum_clients, net_state, config, server_state, celery) nodes = Greenlet(monitor_nodes, config, net_state) nodes.start() network.start() try: exit_event.wait() finally: logger.info("Network monitor thread shutting down...")
def add_worker(config, task, number, worker_pool, processed_task_queue): logger.info('Start worker#{number} for task id={task_id}.'.format( task_id=task.task_id, number=number )) worker = Greenlet( notification_worker, task, processed_task_queue, timeout=config.HTTP_CONNECTION_TIMEOUT, verify=False ) worker_pool.add(worker) worker.start()
class MongoThread(object): """A thread, or a greenlet, that uses a MongoClient""" def __init__(self, test_case): self.use_greenlets = test_case.use_greenlets self.client = test_case.c self.db = self.client[DB] self.ut = test_case self.passed = False def start(self): if self.use_greenlets: # A Gevent extended Greenlet self.thread = Greenlet(self.run) else: self.thread = threading.Thread(target=self.run) self.thread.setDaemon(True) # Don't hang whole test if thread hangs self.thread.start() @property def alive(self): if self.use_greenlets: return not self.thread.dead else: return self.thread.isAlive() def join(self): self.thread.join(20) if self.use_greenlets: msg = "Greenlet timeout" else: msg = "Thread timeout" assert not self.alive, msg self.thread = None def run(self): self.run_mongo_thread() # No exceptions thrown self.passed = True def run_mongo_thread(self): raise NotImplementedError() def disconnect_client(self): if isinstance(self.client, MongoClient): self.client.close() else: # Don't kill the replica set monitor. self.client.disconnect()
def test_socket_reclamation(self): # Check that if a thread starts a request and dies without ending # the request, that the socket is reclaimed into the pool. cx_pool = self.get_pool( pair=(host,port), max_size=10, net_timeout=1000, conn_timeout=1000, use_ssl=False, ) self.assertEqual(0, len(cx_pool.sockets)) lock = None the_sock = [None] def leak_request(): self.assertEqual(NO_REQUEST, cx_pool._get_request_state()) cx_pool.start_request() self.assertEqual(NO_SOCKET_YET, cx_pool._get_request_state()) sock_info = cx_pool.get_socket() self.assertEqual(sock_info, cx_pool._get_request_state()) the_sock[0] = id(sock_info.sock) if not self.use_greenlets: lock.release() if self.use_greenlets: g = Greenlet(leak_request) g.start() g.join(1) self.assertTrue(g.ready(), "Greenlet is hung") else: lock = thread.allocate_lock() lock.acquire() # Start a thread WITHOUT a threading.Thread - important to test that # Pool can deal with primitive threads. thread.start_new_thread(leak_request, ()) # Join thread acquired = lock.acquire() self.assertTrue(acquired, "Thread is hung") force_reclaim_sockets(cx_pool, 1) # Pool reclaimed the socket self.assertEqual(1, len(cx_pool.sockets)) self.assertEqual(the_sock[0], id(one(cx_pool.sockets).sock))
def _run(self): ''' TODO: add exception handling ''' self.context = zmq.Context() def _server(): print("MsgProcessor: server running") socket = self.context.socket(zmq.PAIR) socket.bind("tcp://127.0.0.1:%s" % self.serv_conn[1]) print("MsgProcessor: server -> bound") return socket def _client(): print("MsgProcessor: client running") socket = self.context.socket(zmq.PAIR) socket.connect("tcp://*****:*****@type': 'connect_msg', 'host': '127.0.0.1', 'port': 30303 })) self.server_t = Greenlet(self._client) self.client_t = Greenlet(self._server) self.server_t.start() self.client_t.start()
class WebSocketClient(WebSocketBaseClient): def __init__(self, url, protocols=None, extensions=None): WebSocketBaseClient.__init__(self, url, protocols, extensions) self._th = Greenlet(self.run) self.messages = Queue() def handshake_ok(self): self._th.start() def received_message(self, message): self.messages.put(copy.deepcopy(message)) def receive(self): return self.messages.get()
def check_stream(client, namespace, stream, start, end, limit, timeout, latency): def run(): for event in client.get(stream, start, end, limit=limit, timeout=latency): # Yeah, I'm useless. pass read_greenlet = Greenlet(run) read_greenlet.start() read_greenlet.join(timeout) if not read_greenlet.ready(): read_greenlet.kill() success = False else: success = read_greenlet.successful() return success
def test_reply_reconcile(db, config, message, sync_client): from inbox.server.models.tables.base import Message, SpoolMessage from inbox.server.models.tables.imap import ImapAccount from inbox.server.sendmail.base import reply, recipients to, subject, body = message attachment = None cc = '*****@*****.**' bcc = None account = db.session.query(ImapAccount).get(ACCOUNT_ID) # Create email message, store a local copy + send it: reply(NAMESPACE_ID, account, THREAD_ID, recipients(to, cc, bcc), subject, body, attachment) # Sync to verify reconciliation: synclet = Greenlet(sync_client.start_sync, ACCOUNT_ID) synclet.start() print '\nSyncing...' Greenlet.join(synclet, timeout=60) sync_client.stop_sync(ACCOUNT_ID) spool_messages = db.session.query(SpoolMessage).\ filter_by(subject=THREAD_TOPIC).all() assert len(spool_messages) == 1, 'spool message missing' resolved_message_id = spool_messages[0].resolved_message_id assert resolved_message_id, 'spool message not reconciled' inbox_uid = spool_messages[0].inbox_uid thread_id = spool_messages[0].thread_id g_thrid = spool_messages[0].g_thrid killall(synclet) reconciled_message = db.session.query(Message).get(resolved_message_id) assert reconciled_message.inbox_uid == inbox_uid,\ 'spool message, reconciled message have different inbox_uids' assert reconciled_message.thread_id == thread_id,\ 'spool message, reconciled message have different thread_ids' assert reconciled_message.g_thrid == g_thrid,\ 'spool message, reconciled message have different g_thrids'
def random_delay_broadcast_tor(inputs, t): N = len(inputs) # Instantiate the "broadcast" instruction def makeBroadcast(i): chans = [] # First establish N connections (including a self connection) for j in range(N): host, port = TOR_MAPPINGS[j] chans.append(connect_to_channel(host, port)) def _broadcast(v): for j in range(N): chans[j].put((i, v)) return _broadcast # Get the servers ready def makeOutput(i): def _output(v): print '[%d]' % i, 'output:', v return _output # Create the servers servers = [] for i in range(N): _, port = TOR_MAPPINGS[i] servers.append(listen_to_channel(port)) gevent.sleep(2) print 'servers started' ts = [] for i in range(N): bc = makeBroadcast(i) recv = servers[i].get outp = makeOutput(i) inp = bv_broadcast(i, N, t, bc, recv, outp) th = Greenlet(inp, inputs[i]) th.start() ts.append(th) try: gevent.joinall(ts) except gevent.hub.LoopExit: pass
class Periodic(object): def __init__(self, interval, f, *args, **kwargs): self.interval = interval self.f = f self.args = args self.kwargs = kwargs self._greenlet = None def _run(self): while True: spawn_raw(self.f, *self.args, **self.kwargs) sleep(self.interval) def _discard_greenlet(self, val): self._greenlet = None @property def started(self): return bool(self._greenlet) def start(self, right_away=True): if self._greenlet: raise RuntimeError("Periodic already started.") self._greenlet = Greenlet(self._run) self._greenlet.link(self._discard_greenlet) if right_away: self._greenlet.start() else: self._greenlet.start_later(self.interval) def stop(self, block=True, timeout=None): if not self._greenlet: raise RuntimeError("Periodic is not started") self._greenlet.kill(block=block, timeout=timeout) self._greenlet = None def __repr__(self): return "<Periodic[%.2f seconds, %s] %r(*%r, **%r)>" % ( self.interval, "running" if self.started else "stopped", self.f, self.args, self.kwargs, )
def setup_xaal(): """ setup xAAL Engine & Device. And start it in a Greenlet""" global monitor engine = Engine() cfg = tools.load_cfg_or_die(PACKAGE_NAME) dev = Device("hmi.basic") dev.address = cfg['config']['addr'] dev.vendor_id = "IHSEV" dev.product_id = "REST API" dev.version = 0.1 dev.info = "%s@%s" % (PACKAGE_NAME, platform.node()) engine.add_device(dev) monitor = Monitor(dev, filter_func=monitor_filter) engine.start() green_let = Greenlet(xaal_loop, engine) green_let.start()
class MongoThread(object): """A thread, or a greenlet, that uses a MongoClient""" def __init__(self, test_case): self.use_greenlets = test_case.use_greenlets self.client = test_case.c self.db = self.client[DB] self.ut = test_case self.passed = False def start(self): if self.use_greenlets: # A Gevent extended Greenlet self.thread = Greenlet(self.run) else: self.thread = threading.Thread(target=self.run) self.thread.setDaemon( True) # Don't hang whole test if thread hangs self.thread.start() @property def alive(self): if self.use_greenlets: return not self.thread.dead else: return self.thread.isAlive() def join(self): self.thread.join(20) if self.use_greenlets: msg = "Greenlet timeout" else: msg = "Thread timeout" assert not self.alive, msg self.thread = None def run(self): self.run_mongo_thread() # No exceptions thrown self.passed = True def run_mongo_thread(self): raise NotImplementedError()
def _schedule_new_greenlet(self, func: Callable, *args: Any, in_seconds_from_now: int = None, **kwargs: Any) -> Greenlet: """ Spawn a sub-task and ensures an error on it crashes self/main greenlet """ def on_success(greenlet: Greenlet) -> None: if greenlet in self.greenlets: self.greenlets.remove(greenlet) greenlet = Greenlet(func, *args, **kwargs) greenlet.link_exception(self.on_error) greenlet.link_value(on_success) self.greenlets.append(greenlet) if in_seconds_from_now: greenlet.start_later(in_seconds_from_now) else: greenlet.start() return greenlet
class MemoryLogger: def __init__(self, interval: float) -> None: self._interval = interval self._greenlet = Greenlet(self._run) self._greenlet.name = "MemoryLogger" self._stop = Event() def start(self) -> Greenlet: self._greenlet.start() return self._greenlet def stop(self): self._stop.set() def _run(self) -> None: while not self._stop.is_set(): heap = guppy.hpy().heap() log.debug("Memory report", size=heap.domisize, objects=heap.count) self._stop.wait(self._interval)