def msg_received(self, msg): # TODO: this will require reimplementing messages in some kind of # FSM DSL, ideally programming lang agnostic if msg.id == 1: if not self.state is SS.NEW_CLIENT: raise RuntimeError self.name = msg.name self.transport.send(factory(2)) self.state = SS.GAME_CHALLENGE elif self.state is SS.GAME_CHALLENGE: if msg.id == 4: self.sock.shutdown(2) elif msg.id == 5: self.world = World(20, 20) self.player = self.world.spawn(Player, owner=self) Greenlet.spawn(self.world.ticker()) msg = factory(6) msg.shape = self.world.shape msg.size = self.world.size self.transport.send(msg) self.state = SS.GAME_IN_PROGRESS else: raise RuntimeError elif self.state is SS.GAME_IN_PROGRESS: if msg.id == 8: args = [int(x) for x in msg.coord.split(" ")] self.player.move(Coord(*args)) else: raise RuntimeError
def __init__(self, log, bindport, node_id): Greenlet.__init__(self) self.log = log self.dstaddr = '0.0.0.0' self.bindport = bindport self.node_id = node_id self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.bind(('', bindport)) self.last_sent = 0 self.dht_cache = LRU.LRU(100000) self.dht_router = DHTRouter(self) self.taskman = DHTTaskManager() self.log.write(self.dstaddr + ':' + str(self.bindport) + " open DHT") # add ourselves to the router node_msgobj = codec_pb2.MsgDHTNode() node_msgobj.node_id = node_id node_msgobj.ip = socket.inet_pton(socket.AF_INET, "127.0.0.1") node_msgobj.port = bindport rc = self.dht_router.add_node(node_msgobj) if rc < 0: self.log.write("DHT: failed to add node %s %d" % ("127.0.0.1", bindport))
def random_delay_broadcast1(inputs, t): maxdelay = 0.01 N = len(inputs) buffers = map(lambda _: Queue(1), inputs) # Instantiate the "broadcast" instruction def makeBroadcast(i): def _broadcast(v): def _deliver(j): buffers[j].put((i,v)) for j in range(N): Greenlet(_deliver, j).start_later(random.random()*maxdelay) return _broadcast def makeOutput(i): def _output(v): print '[%d]' % i, 'output:', v return _output ts = [] for i in range(N): bc = makeBroadcast(i) recv = buffers[i].get outp = makeOutput(i) inp = bv_broadcast(i, N, t, bc, recv, outp) th = Greenlet(inp, inputs[i]) th.start_later(random.random()*maxdelay) ts.append(th) try: gevent.joinall(ts) except gevent.hub.LoopExit: pass
def __init__(self, sock, addr): Endpoint.__init__(self, sock, addr) Greenlet.__init__(self) self.observers = BatchList() self.init_gamedata_mixin() self.gdhistory = [] self.usergdhistory = []
def __init__(self, max_sessions, clear_sessions=False, delay_seconds=30): assert delay_seconds > 1 Greenlet.__init__(self) self.db_session = database_setup.get_session() # pending session will be converted to attacks if we cannot match with bait traffic # with this period self.delay_seconds = delay_seconds # clear all pending sessions on startup, pending sessions on startup pending_classification = self.db_session.query(Classification).filter(Classification.type == 'pending').one() pending_deleted = self.db_session.query(Session).filter( Session.classification == pending_classification).delete() self.db_session.commit() logging.info('Cleaned {0} pending sessions on startup'.format(pending_deleted)) self.do_classify = False if clear_sessions or max_sessions == 0: count = self.db_session.query(Session).delete() logging.info('Deleting {0} sessions on startup.'.format(count)) self.db_session.commit() self.max_session_count = max_sessions if max_sessions: logger.info('Database has been limited to contain {0} sessions.'.format(max_sessions)) context = beeswarm.shared.zmq_context self.subscriber_sessions = context.socket(zmq.SUB) self.subscriber_sessions.connect(SocketNames.RAW_SESSIONS) self.subscriber_sessions.setsockopt(zmq.SUBSCRIBE, Messages.SESSION_CLIENT) self.subscriber_sessions.setsockopt(zmq.SUBSCRIBE, Messages.SESSION_HONEYPOT) self.processedSessionsPublisher = context.socket(zmq.PUB) self.processedSessionsPublisher.bind(SocketNames.PROCESSED_SESSIONS) self.config_actor_socket = context.socket(zmq.REQ) self.config_actor_socket.connect(SocketNames.CONFIG_COMMANDS)
def __init__(self, server, jobmanager, **config): Greenlet.__init__(self) self._set_config(**config) self.jobmanager = jobmanager self.server = server self.reporter = server.reporter self.logger = server.register_logger('auxmonitor_{}'.format(self.config['name'])) self.block_stats = dict(accepts=0, rejects=0, solves=0, last_solve_height=None, last_solve_time=None, last_solve_worker=None) self.current_net = dict(difficulty=None, height=None) self.recent_blocks = deque(maxlen=15) self.prefix = self.config['name'] + "_" # create an instance local one_min_stats for use in the def status func self.one_min_stats = [self.prefix + key for key in self.one_min_stats] self.server.register_stat_counters(self.one_min_stats) self.coinservs = self.config['coinservs'] self.coinserv = bitcoinrpc.AuthServiceProxy( "http://{0}:{1}@{2}:{3}/" .format(self.coinservs[0]['username'], self.coinservs[0]['password'], self.coinservs[0]['address'], self.coinservs[0]['port']), pool_kwargs=dict(maxsize=self.coinservs[0].get('maxsize', 10))) self.coinserv.config = self.coinservs[0] if self.config['signal']: gevent.signal(self.config['signal'], self.update, reason="Signal recieved")
def __init__(self, poll_interval=30, chunk_size=100): self.poll_interval = poll_interval self.chunk_size = chunk_size self.transaction_pointer = None self.log = log.new(component='search-index') Greenlet.__init__(self)
def __init__(self, max_sessions, clear_sessions=False, delay_seconds=30): assert delay_seconds > 1 Greenlet.__init__(self) db_session = database_setup.get_session() self.enabled = True # pending session will be converted to attacks if we cannot match with bait traffic # with this period self.delay_seconds = delay_seconds # clear all pending sessions on startup, pending sessions on startup pending_classification = db_session.query(Classification).filter(Classification.type == 'pending').one() pending_deleted = db_session.query(Session).filter( Session.classification == pending_classification).delete() db_session.commit() logging.info('Cleaned {0} pending sessions on startup'.format(pending_deleted)) self.do_classify = False self.do_maintenance = False if clear_sessions or max_sessions == 0: db_session = database_setup.get_session() count = db_session.query(Session).delete() logging.info('Deleting {0} sessions on startup.'.format(count)) db_session.commit() self.max_session_count = max_sessions if max_sessions: logger.info('Database has been limited to contain {0} sessions.'.format(max_sessions)) context = beeswarm.shared.zmq_context # prepare sockets self.drone_data_socket = context.socket(zmq.SUB) self.processedSessionsPublisher = context.socket(zmq.PUB) self.databaseRequests = context.socket(zmq.REP) self.config_actor_socket = context.socket(zmq.REQ) self.drone_command_receiver = context.socket(zmq.PUSH)
def start_worker_with_task(config, processed_task_queue, task, worker_pool): """ Создание воркера на выполнение task :param config: конфигурация :type config: Config :param processed_task_queue :type processed_task_queue: gevent_queue.Queue :param task :type task: Task :param worker_pool :type worker_pool: gevent.pool.Pool """ worker = Greenlet( notification_worker, task, processed_task_queue, timeout=config.HTTP_CONNECTION_TIMEOUT, verify=False ) worker_pool.add(worker) worker.start()
def __init__(self, fetcher_url_queue,process_html_queue,start_url,max_depth,url_list): Greenlet.__init__(self) self.fetcher_url_queue = fetcher_url_queue; self.process_html_queue = process_html_queue; self.start_url = start_url; self.max_depth = max_depth; self.url_list = url_list;
def run(self): dispatcher = downloader.init() stopper = Greenlet(downloader.stop) stopper.start_later(5) dispatcher.join()
def __init__(self,gc,threadList,processor=""): Greenlet.__init__(self) self.gc=gc self.opList=gc.opList self.threadList=threadList self.processor=processor self.sleeptimes=600
class MongoThread(object): """A thread, or a greenlet, that uses a Connection""" def __init__(self, test_case): self.use_greenlets = test_case.use_greenlets self.connection = test_case.c self.db = self.connection[DB] self.ut = test_case self.passed = False def start(self): if self.use_greenlets: self.thread = Greenlet(self.run) else: self.thread = threading.Thread(target=self.run) self.thread.start() def join(self): self.thread.join() self.thread = None def run(self): self.run_mongo_thread() # No exceptions thrown self.passed = True def run_mongo_thread(self): raise NotImplementedError()
def __init__(self,name): Greenlet.__init__(self) self.name=name self.inbox=Queue() self.outbox=Queue() self.proceed=Event() self.proceed.set()
def __init__(self, poll_interval=30, chunk_size=DOC_UPLOAD_CHUNK_SIZE): self.poll_interval = poll_interval self.chunk_size = chunk_size self.transaction_pointers = {} self.log = log.new(component='contact-search-index') Greenlet.__init__(self)
def __init__(self, log, peermgr, sock=None, dstaddr=None, dstport=None): Greenlet.__init__(self) self.log = log self.peermgr = peermgr self.dstaddr = dstaddr self.dstport = dstport self.recvbuf = "" self.ver_send = MIN_PROTO_VERSION self.last_sent = 0 if sock is None: self.log.write("connecting to " + self.dstaddr) self.outbound = True try: self.sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((dstaddr, dstport)) except: self.handle_close() # immediately send message vt = self.version_msg() self.send_message("version", vt) else: self.sock = sock self.outbound = False if self.dstaddr is None: self.dstaddr = '0.0.0.0' if self.dstport is None: self.dstport = 0 self.log.write(self.dstaddr + " connected")
def __init__(self, bot): # Make the thread stoppable Greenlet.__init__(self) self._stop = gevent.event.Event() self.bot = bot # Handle to PerusBot self.repeats = {} # Explained in docstring self.calls = {} # Explained in docstring self.lastrepeat = 0 # Time when last repeating tasks were done # Add own logger to Timer. self.log = logger.Logger('Timer',bot.sets['timer_logfile']) # USE DATABASE. # Values to create to table. table_values = 'id INTEGER PRIMARY KEY,time INTEGER,' table_values += 'receiver TEXT,msg TEXT' # Basically, id is null and ? is replced given value when insert_data() # is called. ins_params = '(null, ?, ?, ?)' db_file = self.bot.sets['timer_db_file'] # Database filename table = 'jobs' self.db = database.DbHandler(self.bot, db_file, table, table_values, ins_params)
def ws(self): # open websocket websock = request.environ["ws4py.websocket"] # websock_version = request.environ['wsgi.websocket_version'] # sec_websocket_extensions = request.environ.get('HTTP_SEC_WEBSOCKET_EXTENSIONS') # sec_websocket_key = request.environ.get('HTTP_SEC_WEBSOCKET_KEY') # sec_websocket_version = request.environ.get('HTTP_SEC_WEBSOCKET_VERSION') endpoint = websock.sock.getpeername() # print 'connection established with endpoint %s:%s, version %s / %s, key %s, extensions %s' \ # % (endpoint[0], endpoint[1], websock_version, sec_websocket_version, sec_websocket_key, sec_websocket_extensions) from gevent import Greenlet g1 = Greenlet(websock.run) g1.start() websock.send("Hello dear Browser! I'll send you redis stuff when I get some") g2 = Greenlet(send_stuff_in_intervals, websock) # g2 = Greenlet(send_redis_stuff, websock) g2.start() g2.join() g1.join() print "connection closed to %s:%s" % endpoint
def reset_normalized(self): """Deletes all normalized data from the datastore.""" logger.info("Initiating database reset - all normalized data will be deleted. (Starting timer)") start = time.time() for collection in self.db.collection_names(): if collection not in ["system.indexes", "hpfeed", "hpfeeds"]: logger.warning("Dropping collection: {0}.".format(collection)) self.db.drop_collection(collection) logger.info("All collections dropped. (Elapse: {0})".format(time.time() - start)) logger.info("Dropping indexes before bulk operation.") self.db.hpfeed.drop_indexes() logger.info("Indexes dropped(Elapse: {0}).".format(time.time() - start)) logger.info("Resetting normalization flags from hpfeeds collection.") self.db.hpfeed.update( {}, {"$set": {"normalized": False}, "$unset": {"last_error": 1, "last_error_timestamp": 1}}, multi=True ) logger.info("Done normalization flags from hpfeeds collection.(Elapse: {0}).".format(time.time() - start)) logger.info("Recreating indexes.") self.ensure_index() logger.info("Done recreating indexes (Elapse: {0})".format(time.time() - start)) logger.info("Full reset done in {0} seconds".format(time.time() - start)) # This is a one-off job to generate stats for hpfeeds which takes a while. Greenlet.spawn(self.rg.do_legacy_hpfeeds)
def __init__(self, node, peersocket, address, port = None): # for incoming connection, port = None # for outgoing conneciton, socket = None Greenlet.__init__(self) self.node = node self.socket = peersocket self.dstaddr = address self.dstport = port self.recvbuf = "" self.last_sent = 0 self.getblocks_ok = True self.last_block_rx = time.time() self.last_getblocks = 0 self.hash_continue = None self.log = log self.ver_recv = MIN_PROTO_VERSION self.remote_height = -1 if self.socket: self.direction = "INCOMING" print("in coming connection") else: self.socket = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.direction = "OUTGOING" print("outgoing connection!") print("connecting") try: self.socket.connect((self.dstaddr, self.dstport)) except Exception, err: print "Exception: ", Exception, err print "Unable to establish connection" self.handle_close() self.sendVersionMessage()
def __init__(self,sock): Greenlet.__init__(self) self.sock = sock self.recvq = Queue() self.sendq = Queue() self.send = self.sendq.put self.recv = self.recvq.get
def __init__(self, run=None, *args, **kwargs): Greenlet.__init__(self) self.isStart = False self.inbox = queue.Queue() context = zmq.Context() self.sock = context.socket(zmq.PUB) self.to_db_address = (ToDBAddress().m2db_host,ToDBAddress().m2db_port)
def produce(self, session, time): goods = [] if not self.type.from_good: return self.time_produce(time) for type in self.type.from_good: for good in self.owner.goods: if type == good.type: goods.append(good) break if not goods: return False quantity = min(goods, key=lambda g: g.quantity).quantity quantity += self.quantitylevel for good in goods: session.delete(good) session.commit() self.producing = True gl = Greenlet(self.create_product, session, quantity) gl.start_later(quantity * time) return True
def __init__(self, sock, data_listener, outfile=None, prefix_state=False): """ @param sock The connection to read in characters from the instrument. @param data_listener data_listener(sample) is called whenever a new data line is received, where sample is a dict indexed by the names in trhph.CHANNEL_NAMES. @param outfile name of output file for all received data; None by default. @param prefix_state True to prefix each line in the outfile with the current state; False by default. """ # Thread.__init__(self, name="_Recv") Greenlet.__init__(self) self._sock = sock self._data_listener = data_listener self._last_line = '' self._new_line = '' self._lines = [] self._active = True self._outfile = outfile self._prefix_state = prefix_state self._state = None # self.setDaemon(True) self._last_data_burst = None self._diagnostic_data = [] self._system_info = {} self._power_statuses = {} log.debug("_Recv created.")
class WebSocketClient(WebSocketBaseClient): def __init__(self, url, protocols=None, extensions=None): WebSocketBaseClient.__init__(self, url, protocols, extensions) self._th = Greenlet(self.run) self.messages = Queue() def handshake_ok(self): self._th.start() def received_message(self, message): self.messages.put(copy.deepcopy(message)) def closed(self, code, reason=None): # When the connection is closed, put a StopIteration # on the message queue to signal there's nothing left # to wait for self.messages.put(StopIteration) def receive(self): # If the websocket was terminated and there are no messages # left in the queue, return None immediately otherwise the client # will block forever if self.terminated and self.messages.empty(): return None message = self.messages.get() if message is StopIteration: return None return message
def build_receiver(cls, sock, bufsize=4096, ooi_digi=False, data_listener=None, outfile=None, prefix_state=True): """ Creates a returns a receiver object that handles all received responses from the connection, keeping relevant information and a state. @param sock To read in from the instrument, sock.recv(bufsize) @param bufsize To read in from the instrument, sock.recv(bufsize) @param ooi_digi True to indicate the connection is with an OOI Digi; False to indicate the connection is with an actual ADCP unit. By default, False. @param data_listener @param outfile @param prefix_state """ receiver = _Receiver(sock, bufsize, ooi_digi, data_listener, outfile, prefix_state) if cls._use_greenlet: from gevent import Greenlet runnable = Greenlet(receiver.run) log.info("Created Greenlet-based _Receiver") else: from threading import Thread runnable = Thread(target=receiver.run) runnable.setDaemon(True) log.info("Created Thread-based _Receiver") receiver._thr = runnable return receiver
def __init__(self, sock, addr): Endpoint.__init__(self, sock, addr) Greenlet.__init__(self) self.observers = BatchList() self.gamedata = Gamedata() self.cmd_listeners = defaultdict(WeakSet) self.current_game = None
def __init__(self, conn_pool): Greenlet.__init__(self) self.conn_pool = conn_pool self.connections = self.conn_pool.pool self.max_idle = self.conn_pool.max_idle self.eviction_delay = self.conn_pool.eviction_delay
def toBeScheduled(): for i in iterList: bc = bcList[i] # makeBroadcast(i) sd = sdList[i] recv = servers[0].get th = Greenlet(honestParty, i, N, t, controlChannels[i], bc, recv, sd, options.B) th.parent_args = (N, t) th.name = 'client_test_freenet.honestParty(%d)' % i controlChannels[i].put(('IncludeTransaction', transactionSet)) th.start() mylog('Summoned party %i at time %f' % (i, time.time()), verboseLevel=-1) ts.append(th) try: gevent.joinall(ts) except ACSException: gevent.killall(ts) except finishTransactionLeap: ### Manually jump to this level print 'msgCounter', msgCounter print 'msgTypeCounter', msgTypeCounter # message id 0 (duplicated) for signatureCost logChannel.put(StopIteration) mylog("=====", verboseLevel=-1) for item in logChannel: mylog(item, verboseLevel=-1) mylog("=====", verboseLevel=-1) except gevent.hub.LoopExit: # Manual fix for early stop while True: gevent.sleep(1) checkExceptionPerGreenlet() finally: print "Consensus Finished"
def __init__(self, account_id, folder_name, folder_id, email_address, provider_name, poll_frequency, syncmanager_lock, refresh_flags_max, retry_fail_classes): self.account_id = account_id self.folder_name = folder_name self.folder_id = folder_id self.poll_frequency = poll_frequency self.syncmanager_lock = syncmanager_lock self.refresh_flags_max = refresh_flags_max self.retry_fail_classes = retry_fail_classes self.state = None self.provider_name = provider_name with mailsync_session_scope() as db_session: account = db_session.query(Account).get(self.account_id) self.throttled = account.throttled self.namespace_id = account.namespace.id assert self.namespace_id is not None, "namespace_id is None" self.state_handlers = { 'initial': self.initial_sync, 'initial uidinvalid': self.resync_uids, 'poll': self.poll, 'poll uidinvalid': self.resync_uids, 'finish': lambda self: 'finish', } Greenlet.__init__(self) self.sync_status = SyncStatus(self.account_id, self.folder_id) self.sync_status.publish(provider_name=self.provider_name, folder_name=self.folder_name)
def reset_normalized(self): """Deletes all normalized data from the datastore.""" logger.info( 'Initiating database reset - all normalized data will be deleted. (Starting timer)' ) start = time.time() for collection in self.db.collection_names(): if collection not in ['system.indexes', 'hpfeed', 'hpfeeds']: logger.warning('Dropping collection: {0}.'.format(collection)) self.db.drop_collection(collection) logger.info( 'All collections dropped. (Elapse: {0})'.format(time.time() - start)) logger.info('Dropping indexes before bulk operation.') self.db.hpfeed.drop_indexes() logger.info('Indexes dropped(Elapse: {0}).'.format(time.time() - start)) logger.info('Resetting normalization flags from hpfeeds collection.') self.db.hpfeed.update({}, { "$set": { 'normalized': False }, '$unset': { 'last_error': 1, 'last_error_timestamp': 1 } }, multi=True) logger.info( 'Done normalization flags from hpfeeds collection.(Elapse: {0}).'. format(time.time() - start)) logger.info('Recreating indexes.') self.ensure_index() logger.info( 'Done recreating indexes (Elapse: {0})'.format(time.time() - start)) logger.info('Full reset done in {0} seconds'.format(time.time() - start)) #This is a one-off job to generate stats for hpfeeds which takes a while. Greenlet.spawn(self.rg.do_legacy_hpfeeds)
def _broadcast(v): def _deliver(j): mylog(bcolors.OKGREEN + "MSG: [%d] -> [%d]: %s" % (i, j, repr(v)) + bcolors.ENDC) buffers[j].put((i, v)) mylog(bcolors.OKGREEN + " [%d] -> [%d]: Finish" % (i, j) + bcolors.ENDC) for j in range(N): Greenlet(_deliver, j).start_later(random.random() * maxdelay)
def test_send_ping(server): websocket = WebSocket(server_url='ws://0.0.0.0:8001') with patch.object(websocket, 'handle_ping') as mock_handle: assert websocket.connected is False websocket.connect(upgrade=False) def connection_handler(): while True: try: message = websocket.receive() except Exception: logger.exception('connection handler exploded') raise if message: logger.info('got message: %s', message) assert websocket.connected is True # the first bytes sent down the connection are the response bytes # to the TCP connection and upgrade. we receieve in this thread # because it will block all execution Greenlet.spawn(connection_handler) gevent.sleep(0.01) # enough for the upgrade to happen clients = server.clients assert len(clients) == 1 client_handler = list(clients.values())[0] socket = client_handler.ws ping_frame = Ping() socket.send(ping_frame.frame) with gevent.Timeout(5): while mock_handle.call_count != 1: gevent.sleep(0.01) assert mock_handle.call_count == 1 assert mock_handle.call_args == call(ping_frame=ANY) call_param = mock_handle.call_args[1]['ping_frame'] assert isinstance(call_param, Ping)
def _schedule_new_greenlet(self, func: Callable, *args, in_seconds_from_now: int = None, **kwargs) -> Greenlet: """ Spawn a sub-task and ensures an error on it crashes self/main greenlet """ def on_success(greenlet): if greenlet in self.greenlets: self.greenlets.remove(greenlet) greenlet = Greenlet(func, *args, **kwargs) greenlet.link_exception(self.on_error) greenlet.link_value(on_success) self.greenlets.append(greenlet) if in_seconds_from_now: greenlet.start_later(in_seconds_from_now) else: greenlet.start() return greenlet
def test_simple_group(): values = range(2) test_function = CountCalls(pass_parameter) greenlet = group( Greenlet(test_function, values[0]), (test_function, values[1]), ) greenlet.start() assert values == greenlet.get() assert 2 == test_function.calls
def main(self): PluginManager.plugin_manager.log.debug( "Plugin ZeroMetrics attach to main") self.initialization() if self.config.is_write_zite_data: self.write_zite_data_greenlet = Greenlet.spawn( self.zite_data_greenlet) super(ActionsPlugin, self).main()
def __init__(self, account_id, namespace_id, email_address, folder_id, folder_name, provider_name, poll_frequency=1, scope=None): self.account_id = account_id self.namespace_id = namespace_id self.provider_name = provider_name self.poll_frequency = poll_frequency self.scope = scope self.log = logger.new(account_id=account_id) self.shutdown = event.Event() self.heartbeat_status = HeartbeatStatusProxy(self.account_id, folder_id, folder_name, email_address, provider_name) Greenlet.__init__(self)
def test_simple_chain(): value = object() test_function = CountCalls(pass_parameter) greenlet = chain( Greenlet(test_function, value), test_function, ) greenlet.start() assert value == greenlet.get() assert 2 == test_function.calls
def server(): s = WebSocketServer( ('0.0.0.0', 8001), Resource(OrderedDict([('/', TestApplication)])) ) s.start() thread = Greenlet.spawn(s.serve_forever) yield s s.stop() thread.kill()
def __init__(self, account_id, folder_name, folder_id, email_address, provider_name, poll_frequency, syncmanager_lock, refresh_flags_max, retry_fail_classes): bind_context(self, 'foldersyncengine', account_id, folder_id) self.account_id = account_id self.folder_name = folder_name self.folder_id = folder_id self.poll_frequency = poll_frequency self.syncmanager_lock = syncmanager_lock self.refresh_flags_max = refresh_flags_max self.retry_fail_classes = retry_fail_classes self.state = None self.provider_name = provider_name self.is_initial_sync = False self.is_first_sync = False with mailsync_session_scope() as db_session: account = db_session.query(Account).get(self.account_id) self.throttled = account.throttled self.namespace_id = account.namespace.id assert self.namespace_id is not None, "namespace_id is None" folder = db_session.query(Folder).get(self.folder_id) if folder: self.is_initial_sync = folder.initial_sync_end is None self.is_first_sync = folder.initial_sync_start is None self.state_handlers = { 'initial': self.initial_sync, 'initial uidinvalid': self.resync_uids, 'poll': self.poll, 'poll uidinvalid': self.resync_uids, 'finish': lambda self: 'finish', } Greenlet.__init__(self) self.heartbeat_status = HeartbeatStatusProxy(self.account_id, self.folder_id, self.folder_name, email_address, self.provider_name)
def _test_rbc1(N=4, f=1, leader=None, seed=None): # Test everything when runs are OK #if seed is not None: print 'SEED:', seed sid = 'sidA' rnd = random.Random(seed) router_seed = rnd.random() if leader is None: leader = rnd.randint(0,N-1) sends, recvs = simple_router(N, seed=seed) threads = [] leader_input = Queue(1) for i in range(N): input = leader_input.get if i == leader else None t = Greenlet(reliablebroadcast, sid, i, N, f, leader, input, recvs[i], sends[i]) t.start() threads.append(t) m = "Hello! This is a test message." leader_input.put(m) gevent.joinall(threads) assert [t.value for t in threads] == [m]*N
def callInThreadWithCallback(self,onResult,func,*args,**kwargs): """Call a callable object in a separate greenlet and call onResult with the return value.""" if self.open: def task(): try: res = func(*args,**kwargs) except: onResult(False,failure.Failure()) else: onResult(True,res) self.add(Greenlet.spawn_later(0,task,*args,**kwargs))
def test_with_immutable(): count = 3 generator = generate_greenlets(count) extra_value = object() extra_function = CountCalls(pass_parameter) immutable_greenlet = Greenlet(extra_function, extra_value) immutable_greenlet.immutable = True greenlet = group( generator.greenlets[0], generator.greenlets[1], immutable_greenlet, generator.greenlets[2], ) greenlet.start() assert generator.values[0:2] + [ extra_value ] + generator.values[2:3] == greenlet.get() assert count == generator.calls assert 1 == extra_function.calls
def setup(): """ setup xAAL Engine & Device. And start it in a Greenlet""" global monitor engine = Engine() cfg = tools.load_cfg_or_die(PACKAGE_NAME) dev = Device("hmi.basic") dev.address = cfg['config']['addr'] dev.vendor_id = "IHSEV" dev.product_id = "WEB Interface" dev.version = 0.1 dev.info = "%s@%s" % (PACKAGE_NAME, platform.node()) engine.add_device(dev) monitor = Monitor(dev, filter_func=monitor_filter) monitor.subscribe(event_handler) engine.start() green_let = Greenlet(xaal_loop, engine) green_let.start()
def handle_endpoints (self, worker, uri_path, env, start_response, body): """UnitOfWork REST endpoints, delegated from the Worker""" if uri_path == '/shard/init': # initialize the shard Greenlet(self.shard_init, worker, env, start_response, body).start() return True elif uri_path == '/data/load': # load the data Greenlet(self.data_load, worker, env, start_response, body).start() return True elif uri_path == '/calc/run': # run the calculations Greenlet(self.calc_run, worker, env, start_response, body).start() return True elif uri_path == '/shard/dump': # dump the results Greenlet(self.shard_dump, worker, env, start_response, body).start() return True else: return False
def check_stream(client, namespace, stream, start, end, limit, timeout, latency): def run(): for event in client.get(stream, start, end, limit=limit, timeout=latency): # Yeah, I'm useless. pass read_greenlet = Greenlet(run) read_greenlet.start() read_greenlet.join(timeout) if not read_greenlet.ready(): read_greenlet.kill() success = False else: success = read_greenlet.successful() return success
def __init__(self): """Initialize the worker manager thread.""" Greenlet.__init__(self) # Initialize self.app = create_app() self.working_bots = {} self.credentials = [] self.mutex = Lock() # Parse credentials from config bot_credentials_string = self.app.config['STEAM_BOTS'] bot_credentials = bot_credentials_string.split('@') i = 0 while i < len(bot_credentials): login = bot_credentials[i] password = bot_credentials[i + 1] self.credentials.append(Credential(login, password)) i = i + 2
def __init__(self, api_clients_queue=None, resource_items_queue=None, db=None, config_dict=None, retry_resource_items_queue=None, api_clients_info=None): Greenlet.__init__(self) self.exit = False self.update_doc = False self.db = db self.config = config_dict self.api_clients_queue = api_clients_queue self.resource_items_queue = resource_items_queue self.retry_resource_items_queue = retry_resource_items_queue self.bulk = {} self.bulk_save_limit = self.config['bulk_save_limit'] self.bulk_save_interval = self.config['bulk_save_interval'] self.start_time = datetime.now() self.api_clients_info = api_clients_info
def _callback(val): # Get notified for i # Greenlet(callBackWrap(binary_consensus, callbackFactory(i)), pid, # N, t, 1, make_bc(i), reliableBroadcastReceiveQueue[i].get).start() if not i in receivedChannelsFlags: receivedChannelsFlags.append(i) # mylog('B[%d]binary consensus_%d_starts with 1 at %f' % (pid, i, time.time()), verboseLevel=-1) greenletPacker( Greenlet(binary_consensus, i, pid, N, t, 1, decideChannel[i], make_bc(i), reliableBroadcastReceiveQueue[i].get), 'acs.callbackFactory.binary_consensus', (pid, N, t, Q, broadcast, receive)).start()
def on_post(self, req, resp): start = time.time() ret = {"success": False} method = req.get_param("method") or 'nd' pid = req.get_param("pid") or 'default' products = page_results.get_collection_from_ip_and_pid(req.env['REMOTE_ADDR'], pid) print "using products collection {0}".format(products) data = json_util.loads(req.stream.read()) page_url = data.get("pageUrl") images = data.get("imageList") # attempt to filter bad urls images = filter(lambda url:all(list(urlparse(url))[:3]), images) try: if type(images) is list and page_url is not None: if method == 'pd': relevancy_dict = {url: page_results.handle_post(url, page_url, products, 'pd') for url in images} ret["success"] = True ret["relevancy_dict"] = relevancy_dict else: # db CHECK PARALLEL WITH gevent exists = {url: Greenlet.spawn(fast_results.check_if_exists, url, products, method) for url in images} gevent.joinall(exists.values()) relevancy_dict = {} images_to_rel_check = [] # DIVIDE RESULTS TO "HAS AN ANSWER" AND "WE DON'T KNOW THIS IMAGE" for url, green in exists.iteritems(): if green.value is not None: relevancy_dict[url] = green.value else: images_to_rel_check.append(url) # RELEVANCY CHECK LIOR'S POOLING inputs = [(image_url, page_url, products) for image_url in images_to_rel_check] outs = simple_pool.map(fast_results.check_if_relevant_and_enqueue, inputs) relevancy_dict.update({images_to_rel_check[i]: outs[i] for i in xrange(len(images_to_rel_check))}) ret["success"] = True ret["relevancy_dict"] = relevancy_dict else: ret["success"] = False ret["error"] = "Missing image list and/or page url" except Exception as e: ret["error"] = traceback.format_exc() resp.data = json_util.dumps(ret) resp.content_type = 'application/json' resp.status = falcon.HTTP_200 print "ON_POST took {0} seconds".format(time.time()-start)
def _broadcast(v): def _deliver(j): random_delay_binary_consensus.msgCount += 1 tmpCount = random_delay_binary_consensus.msgCount mylog(bcolors.OKGREEN + "MSG: [%d] -[%d]-> [%d]: %s" % (i, tmpCount, j, repr(v)) + bcolors.ENDC) buffers[j].put((i, v)) mylog(bcolors.OKGREEN + " [%d] -[%d]-> [%d]: Finish" % (i, tmpCount, j) + bcolors.ENDC) for j in range(N): Greenlet(_deliver, j).start_later(random.random() * maxdelay)
def __init__(self, target=''): Greenlet.__init__(self) self.target = target r = requests.get(target) self.headers = r.headers self.content = r.content gevent.spawn(self.loadplugins).join() gevent.joinall([ gevent.spawn(self.runplugin, '1'), gevent.spawn(self.runplugin, '2'), gevent.spawn(self.runplugin, '3'), gevent.spawn(self.runplugin, '4'), gevent.spawn(self.runplugin, '5'), gevent.spawn(self.runplugin, '6'), gevent.spawn(self.runplugin, '7'), gevent.spawn(self.runplugin, '8'), gevent.spawn(self.runplugin, '9'), gevent.spawn(self.runplugin, '10'), ])
def _run_impl(self): self.sync = Greenlet( retry_with_logging, self.sync, account_id=self.account_id, provider=self.provider_name, logger=self.log, ) self.sync.start() self.sync.join() if self.sync.successful(): return self._cleanup() self.log.error( "mail sync should run forever", provider=self.provider_name, account_id=self.account_id, exc=self.sync.exception, ) raise self.sync.exception
def api_test_server(): # Initializing it without raiden_service.api here since that is a # function scope fixture. We will inject it to rest_api object later rest_api = RestAPI(None) api_server = APIServer(rest_api) # TODO: Find out why tests fail with debug=True g = Greenlet.spawn(api_server.run, 5001, debug=False, use_evalex=False) yield rest_api # At sessions teardown kill the greenlet g.kill(block=True, timeout=10) del rest_api del api_server
def run(self): def exec_function(): try: result = self.function(*self.fargs, **self.fkwargs) self.result = result self._status = CONC_STATUS.success except Exception as e: self.exception = e self._status = CONC_STATUS.error self.t = Greenlet.spawn(exec_function) self._status = CONC_STATUS.running
def exec_in_parallel(functions_and_args): # Pass in Functions, args and kwargs in the below format # exec_in_parallel([(self.test, (val1, val2), {key3: val3})]) greenlets = list() for fn_and_arg in functions_and_args: instance = SafeList(fn_and_arg) fn = instance[0] args = instance.get(1, set()) kwargs = instance.get(2, dict()) greenlets.append(Greenlet.spawn(fn, *args, **kwargs)) gevent.sleep(0) return greenlets
def connect_to_channel(hostname, port): s = socks.socksocket() s.connect((hostname, port)) q = Queue(1) def _handle(): while True: obj = q.get() s.sendall(json.dumps(obj) + '\n') Greenlet(_handle).start() return q
def __init__(self, api_clients_queue=None, resource_items_queue=None, db=None, archive_db=None, secret_archive_db=None, config_dict=None, retry_resource_items_queue=None, log_dict=None): Greenlet.__init__(self) self.exit = False self.update_doc = False self.db = db self.archive_db = archive_db self.secret_archive_db = secret_archive_db self.config = config_dict self.log_dict = log_dict self.api_clients_queue = api_clients_queue self.resource_items_queue = resource_items_queue self.retry_resource_items_queue = retry_resource_items_queue self.start_time = datetime.now()
def random_delay_binary_consensus(N, t, inputs): maxdelay = 0.01 buffers = map(lambda _: Queue(1), range(N)) random_delay_binary_consensus.msgCount = 0 # Instantiate the "broadcast" instruction def makeBroadcast(i): def _broadcast(v): def _deliver(j): random_delay_binary_consensus.msgCount += 1 tmpCount = random_delay_binary_consensus.msgCount mylog(bcolors.OKGREEN + "MSG: [%d] -[%d]-> [%d]: %s" % (i, tmpCount, j, repr(v)) + bcolors.ENDC) buffers[j].put((i, v)) mylog(bcolors.OKGREEN + " [%d] -[%d]-> [%d]: Finish" % (i, tmpCount, j) + bcolors.ENDC) for j in range(N): Greenlet(_deliver, j).start_later(random.random() * maxdelay) return _broadcast ts = [] for i in range(N): bc = makeBroadcast(i) recv = buffers[i].get vi = inputs[i] #random.randint(0, 1) decideChannel = Queue(1) th = Greenlet(binary_consensus, i, N, t, vi, decideChannel, bc, recv) th.start_later(random.random() * maxdelay) ts.append(th) gevent.joinall(ts) for key, item in globalState.items(): if item != globalState[0]: mylog(bcolors.FAIL + 'Bad Concensus!' + bcolors.ENDC) print globalState