class RemoteIterator(object): def __init__(self): self.queue = Queue() def __iter__(self): return self def send(self, value): if self.queue is None: raise StopIteration self.queue.put((True, value)) def throw(self, exc): if self.queue is None: raise StopIteration self.queue.put((False, exc)) def close(self): self.throw(StopIteration) def next(self): if self.queue is None: raise StopIteration yields, value = self.queue.get() if yields: return value else: self.queue = None raise value
class FlowTests(TestCase): def create(self, conf={}, events=[]): self.input = Queue() self.output = Queue() context = DummyContext() with context: self.i = self.create_stage(**conf) self.input = self.i.setup(self.output) self.assertEquals(1, len(context.stages)) self.i.start() for ev in events: self.input.put(ev) return self.i def wait(self, timeout=1.0, events=1): with gevent.Timeout(timeout): # wait for input to be consumed and output to be produced while self.input.qsize(): gevent.sleep(0.0) while self.output.qsize() < events: gevent.sleep(0.0) self.i.stop() if events: return [self.output.get() for n in xrange(events)]
class WebSocketClient(WebSocketBaseClient): def __init__(self, url, protocols=None, extensions=None): WebSocketBaseClient.__init__(self, url, protocols, extensions) self._th = Greenlet(self.run) self.messages = Queue() def handshake_ok(self): self._th.start() def received_message(self, message): self.messages.put(copy.deepcopy(message)) def closed(self, code, reason=None): # When the connection is closed, put a StopIteration # on the message queue to signal there's nothing left # to wait for self.messages.put(StopIteration) def receive(self): # If the websocket was terminated and there are no messages # left in the queue, return None immediately otherwise the client # will block forever if self.terminated and self.messages.empty(): return None message = self.messages.get() if message is StopIteration: return None return message
class WebSocketClient(ThreadedClient): def __init__(self, url, protocols=None, version='8'): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) ThreadedClient.__init__(self, url, protocols=protocols, version=version, sock=sock) self._lock = Semaphore() self._th = Greenlet(self._receive) self._messages = Queue() self.extensions = [] def opened(self, protocols, extensions): self.protocols = protocols self.extensions = extensions def received_message(self, m): self._messages.put(copy.deepcopy(m)) def write_to_connection(self, bytes): if not self.client_terminated: return self.sock.sendall(bytes) def closed(self, code, reason=None): self._messages.put(StreamClosed(code, reason)) def receive(self, msg_obj=False): msg = self._messages.get() if isinstance(msg, StreamClosed): return None if msg_obj: return msg else: return msg.data
class SystemClient(object): def __init__(self, socket, address, gateway, server=False): self.socket = socket self.address = address self.gateway = gateway self.server = server self.queue = Queue(maxsize=100) self.receiver = Receiver(self) self.sender = Sender(self) def start(self): self.receiver.start() self.sender.start() def kill(self): if gevent.getcurrent() in [self.receiver, self.sender]: logger.error("SystemClient.kill() may not be called by the client's greenlets!") self.gateway.unregister(self) return self.receiver.kill() self.sender.kill() self.socket.close() def send(self, message, sender): self.queue.put(message) def close(self, message=""): try: logger.warning(u"Closing connection to {0} due to {1}".format(self.address, message)) self.gateway.unregister(self) except Exception, e: traceback.print_exc() logger.error(u"Got {0} while trying to close and unregister a client!".format(e))
class BlackBerryPushService(object): def __init__(self, app_id, password, push_url): self.app_id = app_id self.password = password self.push_url = push_url self._send_queue = Queue() self._send_queue_cleared = Event() self.log = logging.getLogger('pulsus.service.bbp') def _send_loop(self): self._send_greenlet = gevent.getcurrent() try: self.log.info("BlackBerry Push service started") while True: notification = self._send_queue.get() try: self._do_push(notification) except Exception, e: print e self._send_queue.put(notification) gevent.sleep(5.0) finally: if self._send_queue.qsize() < 1 and \ not self._send_queue_cleared.is_set(): self._send_queue_cleared.set()
class AEntitySink(Greenlet, IASyncConsumer): """ Abstract entity sink which is capable of consuming entities via both push and pull-based mechanisms. """ def __init__(self, desc, maxQueueSize=None): Greenlet.__init__(self) self._desc = desc self._input = Queue(maxQueueSize) def put(self, item, block=True, timeout=None): """Inserts an item into this sink's queue""" self._input.put(item, block, timeout) def put_nowait(self, item): """Inserts an item into this sink's queue only if it would be non-blocking""" self._input.put_nowait(item) def _run(self): """Subclasses should override to process the pull-based loop in the context of this sink's Greenlet.""" pass def processQueue(self, queue, async=True, poolSize=128):
class Transport(object): '''Greenlet-safe socket wrapper to emulate twisteds Transport''' __slots__ = ('sock', 'queue', 'send_greenlet') def __init__(self, socket): self.sock = socket self.queue = Queue(maxsize=1000) # maxsize just in case... self.send_greenlet = gevent.spawn(self._sendloop) def _sendloop(self): '''The need for this sendloop is that multiple greenlets may attempt to call `write` simultaneously. Thus, the call to `sock.sendall` must be protected. This could instead be achieved with a Semaphore, but that would perhaps block high priority greenlets. The approach here, using a send queue, seems to be the best way to keep faulty or slow sockets from affecting others. ''' while True: self.sock.sendall(self.queue.get()) gevent.sleep(0) # cooperative yield def write(self, data): self.queue.put(data) def read(self, bytes): return self.sock.recv(bytes) def loseConnection(self): self.send_greenlet.kill() self.sock.close()
class WebSocketHandler(ThreadedHandler): """WebSocket API for handlers This provides a socket-like interface similar to the browser WebSocket API for managing a WebSocket connection. """ def __init__(self, sock, protocols, extensions, environ): ThreadedHandler.__init__(self, sock, protocols, extensions) self.environ = environ self._messages = Queue() self._lock = Lock() self._th = gevent.spawn(self._receive) def closed(self, code, reason=None): self._messages.put(StreamClosed(code, reason)) def received_message(self, m): self._messages.put(copy.deepcopy(m)) def receive(self, msg_obj=False): msg = self._messages.get() if isinstance(msg, StreamClosed): # Maybe we'll do something better return None if msg_obj: return msg else: return msg.data
class LocalControlProgramDescriptor(object): def __init__(self, hc, agentUuid, program_id): self.log = logging.getLogger('LocalControlProgramDescriptor_{}'.format(program_id)) self.agentUuid = agentUuid self.id = program_id self.hc = hc self.queue = Queue() def _receive_msg(self, msg): self.queue.put(msg) def recv(self, block=True, timeout=None): try: self.log.debug("Waiting for msg in blocking call") msg = self.queue.get(block=block, timeout=timeout) return msg except gevent.timeout.Timeout as e: return None except gevent.queue.Empty as e: return None def send(self, msg): return self.hc.send_msg_to_local_control_program(self.id, msg, self.agentUuid) def close(self): return self.hc.stop_local_control_program(self.id, self.agentUuid)
class __AudioNode(gevent.Greenlet): RATE = 44100 CHUNK = 512 PORT = 20000 def __init__(self, is_log=True): gevent.Greenlet.__init__(self) self.is_log = is_log self.command = Queue() def is_quit(self): try: cmd = self.command.get_nowait() return cmd == "q" except Empty: return False def stop(self, msg=""): self.command.put("q") if self.is_log: print "%s - stopping %s" % (self.__class__.__name__, msg) def _run(self): if self.is_log: print "starting: %s" % self.__class__.__name__ try: self.engine() finally: self.pa.close() self.sock.close()
class Spider(object): """docstring for Spider""" def __init__(self, route, RDB): self.route = route self.visited = RDB #must be a redis client self.todolst = Queue(100) def put(self, item): self.todolst.put(item) def _fetch(self, timeout): todo = self.todolst route = self.route visited = self.visited try: while True: url = todo.get(timeout=timeout) handler = route.match(url) if not handler: continue hdl = handler(url) next_urls = hdl.get() visited.set(url,url) gevent.sleep(0.1) [todo.put(ul,timeout=timeout+10) for ul in next_urls if not (visited.exists(ul) or todo.full())] except Empty,Full: #except : #fix me traceback.print_exc() return
def get_subscriber(self, last_event_id=None): """Obtain a new subscriber. The returned subscriber will receive all messages after the one with the given index (if they are still in the cache). last_event_id (unicode): the ID of the last message the client did receive, to request the one generated since then to be sent again. If not given no past message will be sent. return (Subscriber): a new subscriber instance. """ queue = Queue() # If a valid last_event_id is provided see if cache can supply # missed events. if last_event_id is not None and \ re.match("^[0-9A-Fa-f]+$", last_event_id): last_event_key = int(last_event_id, 16) if len(self._cache) > 0 and last_event_key >= self._cache[0][0]: # All missed events are in cache. for key, msg in self._cache: if key > last_event_key: queue.put(msg) else: # Some events may be missing. Ask to reinit. queue.put(b"event:reinit\n\n") # Store the queue and return a subscriber bound to it. self._sub_queues.add(queue) return Subscriber(queue)
def test_kill_run(self, datetime, kill_pid_tree_mock): """ Test :func:`.kill_run`. """ event_queue = Mock() kill_request = Mock() kill_request.id = 1234 kill_request.run.pid = 5678 dts = datetime.now.return_value.isoformat.return_value kill_queue = Queue() kill_queue.put(kill_request) exit_queue = Mock() exit_queue_return = [Empty, None] def exit_queue_side_effect(*args, **kwargs): value = exit_queue_return.pop(0) if callable(value): raise value() exit_queue.get.side_effect = exit_queue_side_effect kill_run(kill_queue, event_queue, exit_queue) kill_pid_tree_mock.assert_called_with(5678) kill_request.patch.assert_called_with({ 'execute_dts': dts, }) event_queue.put.assert_called_with(( '{"kill_request_id": 1234, "kind": "kill_request", ' '"event": "executed"}' ))
class CHubCallbackQueueBase(CHubCallbackBasicBase): def __init__(self,sHubId): CHubCallbackBasicBase.__init__(self,sHubId) self.__queue4Return = Queue() #当前应答队列 def PutCmdStrToReturnQueue(self, lsCmdStr): self.__queue4Return.put(lsCmdStr) def GetCmdStrFmReturnQueue(self): return self.__queue4Return.get() # (sClientIPPort,dwCmdId,CmdOStr) def HandleRequestCmd(self, sClientIPPort, dwCmdId, CmdIStr): # 处理客户端请求命令 bDone = CHubCallbackBasicBase.HandleRequestCmd(self, sClientIPPort, dwCmdId, CmdIStr) if not bDone and CmdIStr[0].startswith(CMD0_ECHO_CMD): CmdOStr = ['OK','CHubCallbackQueueBase'] CmdOStr.extend(CmdIStr) dwCmdId = GetCmdReplyFmRequest(dwCmdId) self.PutCmdStrToReturnQueue([sClientIPPort,dwCmdId,CmdOStr]) bDone = True return bDone def DoHandleCheckAllLinkReply(self): # 处理检查所有链接的应答返还消息(包括通知消息)等 # 该函数在该类中实现后,一般情况下子类无需再继承。 while not self.bQuitLoopFlag: return self.GetCmdStrFmReturnQueue()
class _ZMQOut(_ZMQ): """ **A still-abstract implementation of _ZMQ base that is designed for an event being SENT over ZeroMQ** """ def __init__(self, name, mode="connect", *args, **kwargs): super(_ZMQOut, self).__init__(name, mode=mode, *args, **kwargs) self.outbound_queue = Queue() def consume(self, event, *args, **kwargs): self.outbound_queue.put(event) def pre_hook(self): self.threads.spawn(self.__consume_outbound_queue) def __consume_outbound_queue(self): while self.loop(): try: event = self.outbound_queue.get(timeout=2.5) except Exception: event = None if event is not None: try: self.socket.send(pickle.dumps(event)) except Exception as err: self.logger.error("Unable to send event over ZMQ: {err}".format(err=err), event=event)
class WorkerPool(object): def __init__(self): self.pool_size = options.pool_size self.job_pool = Pool(size=self.pool_size) self.result = Queue() self.target_queue = Queue() def add_job(self, job_func, *args, **kwargs): job = self.job_pool.apply_async( job_func, args=args, kwds=kwargs, callback=self._call_func) self.job_pool.add(job) def run(self, timeout=None): self.job_pool.join(timeout=timeout, raise_error=False) def _call_func(self, job_ret): if job_ret: self.result.put(job_ret) def shutdown(self): self.job_pool.kill()
class MessagePump(object): def __init__(self, client): self._client = client self._queue = Queue(maxsize=15) self._greenlet = None self._rate_limit = RateLimiter(10, 10) def start(self): if not self._greenlet: self._greenlet = gevent.spawn(self._loop) def _loop(self): while True: next_message, async_result = self._queue.get() self._rate_limit.wait() try: result = self._client._create_message(**next_message) self._rate_limit.consume() async_result.set(result) except Exception as e: async_result.set_exception(e) def send(self, **kwargs): result = AsyncResult() self._queue.put((kwargs, result)) return result.get()
class Spider: def __init__(self, url='', depth=1, threads=4): self.url = url self.depth = depth self.threads = threads self.tasks = Queue() self.bucket = [] def run(self): self.tasks.put(Task(self.url, self.depth)) threds = [ gevent.spawn(self.worker) for i in range(self.threads) ] gevent.joinall(threds) def worker(self, worker_id=''): while not self.tasks.empty(): task = self.tasks.get() if task.url in self.bucket: # here have a bug continue self.bucket.append(task.url) task.run() for t in task.subtasks: self.tasks.put_nowait(t)
def handle(self, *args, **options): if not options['noinput']: confirm = raw_input(""" ### %s Fast Reindex !!! ### You have requested to do a fluff index reset via fast track. This will update all your fluff indicators in place. Type 'yes' to continue, or 'no' to cancel: """ % self.pillow_class.__name__) if confirm != 'yes': print "\tReset cancelled." return from gevent.monkey import patch_all patch_all() self._bootstrap(options) start = datetime.utcnow() gevent.signal(signal.SIGQUIT, gevent.shutdown) queue = Queue(POOL_SIZE) workers = [gevent.spawn(worker, self, queue) for i in range(POOL_SIZE)] print "Starting fast tracked reindexing" for i, row in enumerate(self.full_couch_view_iter()): queue.put((row, i)) gevent.joinall(workers) end = datetime.utcnow() print "done in %s seconds" % (end - start).seconds
class IMapUnordered(Greenlet): def __init__(self, spawn, func, iterable): from gevent.queue import Queue Greenlet.__init__(self) self.spawn = spawn self.func = func self.iterable = iterable self.queue = Queue() self.count = 0 def __iter__(self): return self.queue def _run(self): try: func = self.func for item in self.iterable: self.count += 1 self.spawn(func, item).rawlink(self._on_result) finally: self.__dict__.pop('spawn', None) self.__dict__.pop('func', None) self.__dict__.pop('iterable', None) def _on_result(self, greenlet): self.count -= 1 if greenlet.successful(): self.queue.put(greenlet.value) if self.ready() and self.count <= 0: self.queue.put(StopIteration)
class _WebsocketHandler: """ Websocket handler class. Holds some useful information, and provides a nice api to communicate withe the main greenlet (_start_websocket) """ def __init__(self, namespace): self.namespace = namespace self.message_queue = Queue() self.run = None self.greenlet = None self.is_running = False def spawn(self): self.greenlet = spawn(self.run, self) def get(self): return self.message_queue.get() def send(self, msg): _websocket_send_queue.put(msg) _websocket_send_event.set() def go(self, msg): self.message_queue.put(msg) if not self.is_running: self.spawn() def kill(self): if self.is_running: self.greenlet.kill() def run(self, *args, **kwargs): raise Exception("run method in WebsocketHandler must be overridden")
class Scheduler(object): """ Scheduler """ def __init__(self): self.request_filter = RequestFilter() self.queue = Queue() def enqueue_request(self, request): """put request """ if not request.dont_filter \ and self.request_filter.request_seen(request): logger.warn("ignore %s", request.url) return self.queue.put(request) def next_request(self): """next request """ if self.queue.empty(): return None return self.queue.get() def __len__(self): return self.queue.qsize()
class Spider: def __init__(self, url='', depth=1): self.tasks = Queue() self.tasks.put(url) self.init_url = url or '' self.depth = depth or '' def run(self): threds = [ gevent.spawn(self.work), gevent.spawn(self.work), gevent.spawn(self.work), gevent.spawn(self.work) ] gevent.joinall(threds) def work(self): while not self.tasks.empty(): page = self.tasks.get() p = Page(page, '') p.do_request() p.parse_content() hrefs = p.hrefs for href in hrefs: self.tasks.put_nowait(href)
def test_publish(self, config): """ Test :func:`.websocket`. """ def config_side_effect(*args): return { ('job_runner_worker', 'ws_server_hostname'): 'localhost', ('job_runner_worker', 'ws_server_port'): 5555, }[args] config.get.side_effect = config_side_effect context = Mock() publisher = context.socket.return_value event_queue = Queue() event_queue.put('foo') event_queue.put('bar') exit_queue = Mock() publish(context, event_queue, exit_queue) self.assertEqual([ call(['worker.event', 'foo']), call(['worker.event', 'bar']), ], publisher.send_multipart.call_args_list)
class C2DMService(object): def __init__(self, source, email, password): self.source = source self.email = email self.password = password self._send_queue = Queue() self._send_queue_cleared = Event() self.log = logging.getLogger('pulsus.service.c2dm') def _send_loop(self): self._send_greenlet = gevent.getcurrent() try: self.log.info("C2DM service started") while True: notification = self._send_queue.get() try: self._do_push(notification) except Exception, e: self.log.exception("Error while pushing") self._send_queue.put(notification) gevent.sleep(5.0) finally: if self._send_queue.qsize() < 1 and \ not self._send_queue_cleared.is_set(): self._send_queue_cleared.set()
class StdIO(object): """Handles input and output from stdin/stdout.""" def __init__(self): fcntl.fcntl(sys.stdin, fcntl.F_SETFL, os.O_NONBLOCK) fcntl.fcntl(sys.stdout, fcntl.F_SETFL, os.O_NONBLOCK) # The queue for stdin self.input = Queue() # The queue for stdout self.output = Queue() self._i = gevent.spawn(self._input) self._o = gevent.spawn(self._output) def _input(self): buff = "" while True: socket.wait_read(sys.stdin.fileno()) buff += sys.stdin.read() while "\n" in buff: line, buff = buff.split("\n", 1) self.input.put(line) def _output(self): for line in self.output: sys.stdout.write(line + "\n") sys.stdout.flush() def stop(self): self._o.kill() self._i.kill()
class NotifyingQueue(Event): def __init__(self, maxsize=None, items=()): super().__init__() self._queue = Queue(maxsize, items) def put(self, item): """ Add new item to the queue. """ self._queue.put(item) self.set() def get(self, block=True, timeout=None): """ Removes and returns an item from the queue. """ value = self._queue.get(block, timeout) if self._queue.empty(): self.clear() return value def peek(self, block=True, timeout=None): return self._queue.peek(block, timeout) def __len__(self): return len(self._queue) def copy(self): """ Copies the current queue items. """ copy = self._queue.copy() result = list() while not copy.empty(): result.append(copy.get_nowait()) return result
class DataSource(Greenlet): def __init__(self, gdlist): Greenlet.__init__(self) self.gdlist = gdlist self.channel = Queue(100000) indexes = [i[0] for i in gdlist] cnt = [indexes.count(i) for i in xrange(10)] self.data_count = cnt def _run(self): cnt = self.data_count data = self.gdlist.pop(0) for idx, tag, retchannel in self.channel: if not self.gdlist or not cnt[idx]: retchannel.put(EndpointDied) if tuple(data[:2]) == (idx, tag): retchannel.put(data[2]) data = self.gdlist.pop(0) else: log.info('!! Req:%s, Has:%s', repr((idx, tag)), repr(data[:2])) self.channel.put((idx, tag, retchannel)) def ask_for_feed(self, player_index, tag, retchannel): self.channel.put((player_index, tag, retchannel)) return retchannel.get()
class NotifyingQueue(Event): """ A queue that follows the wait protocol. """ def __init__(self): super(NotifyingQueue, self).__init__() self._queue = Queue() def put(self, item): """ Add new item to the queue. """ self._queue.put(item) self.set() def empty(self): return self._queue.empty() def get(self, block=True, timeout=None): """ Removes and returns an item from the queue. """ value = self._queue.get(block, timeout) if self._queue.empty(): self.clear() return value def stop(self): """ Request a stop event. """ self.set()
def poc(url): # url = "http://www.example.org/default.html?ct=32&op=92&item=98" # --> http://www.example.org if url[:4] != "http": url = "http://" + url o = urlparse(url) url = o.scheme + "://" + o.netloc result = [] payloads = Queue() with open(paths.DATA_PATH + '/source_leak_check_payload.txt') as f: for payload in f.read().splitlines(): payloads.put(payload) # 这里设置100个协程,payload有144个 gevent.joinall([gevent.spawn(bak_scan, url, payloads, result) for i in range(100)]) if result: return result else: return False
def put(self, item, block=True, timeout=None): """Put an item into the pipe. If optional arg *block* is true and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the pipe if a free slot is immediately available, else raise the :class:`Full` exception (*timeout* is ignored in that case). :raises: :class:`PipeClosed` if the pipe is closed """ if self.closed(): raise PipeClosed Queue.put(self, item, block, timeout)
def test_execute_no_shebang(self, config, datetime, RunLog): """ Test :func:`.execute_run` when the shebang is invalid. """ config.get.return_value = '/tmp' run = Mock() run.run_log = None run.id = 1234 run.job.script_content = ( u'I love cheese\n\necho "H\xe9llo World!";\n') event_queue = Mock() exit_queue = Mock() run_queue = Queue() run_queue.put(run) exit_queue_return = [Empty, None] def exit_queue_side_effect(*args, **kwargs): value = exit_queue_return.pop(0) if callable(value): raise value() exit_queue.get.side_effect = exit_queue_side_effect execute_run(run_queue, event_queue, exit_queue) dts = datetime.now.return_value.isoformat.return_value self.assertEqual(dts, run.patch.call_args_list[0][0][0]['start_dts']) log_out = RunLog.return_value.post.call_args_list[0][0][0]['content'] self.assertTrue( log_out.startswith('[job runner worker] Could not execute job:')) self.assertEqual( [call({ 'return_dts': dts, 'return_success': False, })], run.patch.call_args_list[1:]) self.assertEqual([ call('{"kind": "run", "event": "started", "run_id": 1234}'), call('{"kind": "run", "event": "returned", "run_id": 1234}'), ], event_queue.put.call_args_list) datetime.now.assert_called_with(utc)
class WebServerTimeout(): def __init__(self): self.q = Queue() self.wsgi = WSGIServer(('', 8088), self.application, log=None) def application(self, env, start_response): sleep(10) start_response('200 OK', [('Content-Type', 'text/html')]) i = env["wsgi.input"].readlines() env["wsgi.input"] = i self.q.put(env) yield '{"message": "hello world!"}' def start(self): spawn(self.wsgi.start) def stop(self): self.wsgi.stop()
class TCPClient(): def __init__(self): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._group = Group() self._send_buffer = Queue() self._recv_buffer = Queue() def connect(self, address): self._socket.connect(address) self._group.spawn(self._recv_loop) self._group.spawn(self._send_loop) def disconnect(self): self._group.kill() self._socket.close() self._group.join() def _recv_loop(self): buf = "" u4 = lambda x: unpack("<I", x)[0] while True: data = self._socket.recv(8192) buf += data while len(buf) > 0: length = u4(buf[:4]) if len(buf) - 4 < length: break self._recv_buffer.put(buf[4:4 + length]) buf = buf[4 + length:] def _send_loop(self): while True: data = self._send_buffer.get() self._socket.sendall(data) def write(self, data): self._send_buffer.put(data) def get_packet(self): return self._recv_buffer.get()
def newSign(): pool = Pool(20) userlist = Queue() sql = r"update YunHui_sign set `is_sign` = 1 where user_id = %s and fid = %s" sql2 = r"update YunHui_user set flag = 2 where id = %s" users = getFliterUser(1) for user in users: bduss = user[1] tbs = getTBS(bduss) userid = user[0] userlist.put(userid) tiebas = getTiebas(user[0]) for tieba in tiebas: print(tieba[1]) pool.add( gevent.spawn(sign_one, userid, bduss, tieba[1], tieba[2], tbs)) pool.join() to_mysql(sign_queue, sql) to_mysql(userlist, sql2)
class MemoryBufferedPlayable(BasePlayable, AbstractOpus): def __init__(self, other, *args, **kwargs): from gevent.queue import Queue super(MemoryBufferedPlayable, self).__init__(*args, **kwargs) self.frames = Queue() self.other = other gevent.spawn(self._buffer) def _buffer(self): while True: frame = self.other.next_frame() if not frame: break self.frames.put(frame) self.frames.put(None) def next_frame(self): return self.frames.get()
def test_execute_run(self, config, datetime, RunLog): """ Test :func:`.execute_run`. """ config.get.return_value = '/tmp' run = Mock() run.run_log = None run.id = 1234 run.job.script_content = ( u'#!/usr/bin/env bash\n\necho "H\xe9llo World!";\n') event_queue = Mock() exit_queue = Mock() run_queue = Queue() run_queue.put(run) exit_queue_return = [Empty, None] def exit_queue_side_effect(*args, **kwargs): value = exit_queue_return.pop(0) if callable(value): raise value() exit_queue.get.side_effect = exit_queue_side_effect execute_run(run_queue, event_queue, exit_queue) dts = datetime.now.return_value.isoformat.return_value self.assertTrue('pid' in run.patch.call_args_list[1][0][0]) self.assertEqual(dts, run.patch.call_args_list[0][0][0]['start_dts']) self.assertEqual( u'H\xe9llo World!\n'.encode('utf-8'), RunLog.return_value.post.call_args_list[0][0][0]['content']) self.assertEqual([call({ 'return_dts': dts, 'return_success': True, })], run.patch.call_args_list[2:]) self.assertEqual([ call('{"kind": "run", "event": "started", "run_id": 1234}'), call('{"kind": "run", "event": "returned", "run_id": 1234}'), ], event_queue.put.call_args_list) datetime.now.assert_called_with(utc)
class SocketClient(object): def __init__(self, connect_cb=None, shutdown_cb=None, receive_cb=None): self._sockfd = None self._recvbuff = "" self._sendqueue = Queue() self._work = False self._cbConnect = connect_cb self._cbShutdown = shutdown_cb self._cbReceive = receive_cb def connect(self, host, port): self._work = True self._sockfd = gevent.socket.socket() self._sockfd.connect((host, port)) self._cbConnect() gevent.spawn(self._writer) gevent.spawn(self._reader) def shutdown(self): self._work = False self._sockfd.close() self._cbShutdown() def sendData(self, data): self._sendqueue.put(data) def _reader(self): while self._work: data = self._sockfd.recv(1024) if not data: self.shutdown() else: logging.debug("Recv size %d", len(data)) self._cbReceive(data) # gevent.sleep(1) def _writer(self): while self._work: if not self._sendqueue.empty(): data = self._sendqueue.get() self._sockfd.sendall(data) logging.debug("Send size %d", len(data)) gevent.sleep(1)
class MonitoredFunc(object): def __init__(self, func): self.func = func self.out_queue = Queue() def __call__(self, *args, **kwargs): try: res = self.func(*args, **kwargs) except BaseException as e: # propagate exception to monitoring greenlet self.out_queue.put(e) self.out_queue.put(None) def catch_issues(self): # wait for end or exception while True: out = self.out_queue.get() if isinstance(out, BaseException): raise out
class ButianDownloader(): def __init__(self): DIR_PATH = 'html2' if not os.path.isdir(DIR_PATH): print '[!] dir not exists' print '[*] making dir...' os.mkdir(DIR_PATH) os.chdir(DIR_PATH) self.u = "https://loudong.360.cn/Loo/index/p/{0}.html" self.f = "{0}.txt" self.q_u = Queue() self.q_f = Queue() for i in xrange(1, 4619): self.q_u.put(self.u.format(i)) self.q_f.put(self.f.format(i)) def fetch_urls_and_save_to_file(self, url_queue, file_queue, element): try: while not url_queue.empty(): url = url_queue.get() #从url_queue中取出一个url file = file_queue.get() #从file_queue中取出一个file doc = pq(url) ele = doc(element) # 得到某个element的元素对象 print "[*] " + url + " -> " + file # 准备写入文件 with open(file, 'w') as f: count = 30 for j in ele.items(element): f.write(j.text().encode('utf-8') + '\n') count = count - 1 if count == 0: break except KeyboardInterrupt: pass def start(self, element): # 处理所有URL,开启5个线程 gevent_list = [] for index in range(2): gevent_list.append( gevent.spawn(self.fetch_urls_and_save_to_file, self.q_u, self.q_f, element)) gevent.joinall(gevent_list)
class EndPoint(gevent.Greenlet): def __init__(self, svr, address): self.svr = svr self.address = address self.inbox = Queue() self.jobs = [] gevent.Greenlet.__init__(self) def __str__(self): return "[endpoint:%r]" % (self.address, ) def put_data(self, data): self.inbox.put(data) def on_data(self, data): """called when data received. (stripped the 4 bytes header)""" raise NotImplementedError() def send_data(self): """如果封包方式不同,需要重载这个函数""" while True: data = self.inbox.get() self.svr.sendto(data, self.address) def handle(self, datagram): """如果封包方式不同,需要重载这个函数""" self.on_data(datagram) def terminate(self): gevent.killall(self.jobs) self.kill() def _run(self): job_send = gevent.spawn(self.send_data) self.jobs.append(job_send) def _exit(glet): job_send.unlink(_exit) self.terminate() job_send.link(_exit)
def setup_bucket_sizes(self): """ Устанавливает размер порций для всех запросов """ args_queue = Queue() # Запускаем на один и тот же запрос разные работы for info in self.info_list: for finder in self.finders: if not finder.is_info_searchable(info): continue args_queue.put((finder.determine_bucket_size, info)) # Запускаем воркеры workers = [ SetBucketWorker(args_queue, self.logger) for _ in range(self.threads) ] greenlets = [gevent.spawn(worker.run) for worker in workers] # Ждем заверщения работы while any([worker.is_running() for worker in workers]) or args_queue.qsize(): gevent.sleep(0) # Выключаем воркеры for worker in workers: worker.finish() # Ждем выключения gevent.joinall(greenlets) # Устанавливаем размеры порций for info in self.info_list: for finder in self.finders: if not finder.is_info_searchable(info): continue finder.set_bucket_size(info) self.logger.debug( f'{finder.__class__.__name__}: {info.origin_url} - размер порции {finder.get_bucket_size(info)}' )
class Hub: def __init__(self, num): self.need = 0 self.block_queue = Queue(maxsize=1) self.total_player = [] for n in range(num): self.total_player.append(n) self.semaphore = Semaphore() def _get_player(self, num): if len(self.total_player) < num: # block here self.need = num print "_get_player set need=%s" % self.need self.block_queue.get(block=True) assert len(self.total_player) >= num, ( "_get_player error total_player=%s num=%s" % (len(self.total_player), num)) size = len(self.total_player) player_list = self.total_player[size - num:] self.total_player = self.total_player[:size - num] return player_list def acquire_player(self, num): # lock self.semaphore.acquire() print "acquire_player num=%s" % num player_list = self._get_player(num) # unlock self.semaphore.release() return player_list def release_player(self, player_list): self.total_player.extend(player_list) print "release_player need=%s len(self.total_player)=%s" % ( self.need, len(self.total_player)) if self.need == 0 or self.need > len(self.total_player): return self.need = 0 self.block_queue.put(1, block=True)
class SelectChan: ''' SelectChan is a basic multiplexing channel based on the eventlet Queue class. All messages sent (put)on the queue are required to have a 'channel' which they are sent on. When we recieve (get) a message the channel which it was sent on is returned. ''' def __init__(self): self.queue = Queue() def get(self, *args, **kwargs): m = self.queue.get(*args, **kwargs) return m['channel'], m['message'] def put(self, channel, message): m = {'channel': channel, 'message': message} self.queue.put(m) def fetch_chan(self, channel): q = SelectQueue(channel, self.queue) return q
def test_clear_api_client_queue(self): queue = Queue() client_dict = {'id': uuid.uuid4().hex, 'client': None} client_dict2 = {'id': uuid.uuid4().hex, 'client': None} clients_info = { client_dict['id']: { 'destroy': False }, client_dict2['id']: { 'destroy': True } } queue.put(client_dict) queue.put(client_dict2) self.assertEqual(queue.qsize(), 2) clear_api_client_queue(queue, clients_info) self.assertEqual(queue.qsize(), 1) client_dict_from_queue = queue.get() self.assertEqual(client_dict, client_dict_from_queue)
def newUpdate(): sql = r"INSERT INTO YunHui_sign (`fid`,`name`,`level_id`,`cur_score`,`is_sign`,`user_id`) SELECT * from (select %s,%s, %s, %s,0,%s) as tmp WHERE NOT exists (select fid,user_id from YunHui_sign where fid = %s and user_id = %s) LIMIT 1" sql2 = r"update YunHui_user set flag = 1 where id = %s" db.autocommit(True) pool = Pool(20) userlist = Queue() # 未更新关注列表 users = getFliterUser(0) if users == (): return 1 for user in users: username = user[2] print(username) userid = user[0] userlist.put(userid) bduss = user[1] pool.add(gevent.spawn(update_one, userid, bduss)) pool.join() to_mysql(update_queue, sql) to_mysql(userlist, sql2)
def _test_rbc1(N=4, f=1, leader=None, seed=None): # Test everything when runs are OK #if seed is not None: print 'SEED:', seed sid = 'sidA' rnd = random.Random(seed) router_seed = rnd.random() if leader is None: leader = rnd.randint(0,N-1) sends, recvs = simple_router(N, seed=seed) threads = [] leader_input = Queue(1) for i in range(N): input = leader_input.get if i == leader else None t = Greenlet(reliablebroadcast, sid, i, N, f, leader, input, recvs[i], sends[i]) t.start() threads.append(t) m = "Hello! This is a test message." leader_input.put(m) gevent.joinall(threads) assert [t.value for t in threads] == [m]*N
def test__get_resource_item_from_queue(self): items_queue = Queue() item = { 'id': uuid.uuid4().hex, 'dateModified': datetime.datetime.utcnow().isoformat() } items_queue.put(item) # Success test worker = ResourceItemWorker(resource_items_queue=items_queue, config_dict=self.worker_config) self.assertEqual(worker.resource_items_queue.qsize(), 1) resource_item = worker._get_resource_item_from_queue() self.assertEqual(resource_item, item) self.assertEqual(worker.resource_items_queue.qsize(), 0) # Empty queue test resource_item = worker._get_resource_item_from_queue() self.assertEqual(resource_item, None) del worker
def start_tick(simulation_datetime, speed): global simulation_status tick_queue = Queue(3) db = MongoClient('mongodb://127.0.0.1:27017').trade_alarm finish_time = simulation_datetime.replace(hour=15, minute=30) simulation_status = STARTED deliver_greenlet = gevent.spawn(tick_sender, tick_queue, speed) stub.SetSimulationStatus( stock_provider_pb2.SimulationStatus(simulation_on=True, simulation_speed=speed)) while simulation_datetime <= finish_time and simulation_status == STARTED: print('load data', simulation_datetime, 'data period seconds', AT_ONCE_SECONDS, 'real time', datetime.now()) data = collect_db( db, simulation_datetime, simulation_datetime + timedelta(seconds=AT_ONCE_SECONDS)) while True: try: tick_queue.put(data, True, 1) break except gevent.queue.Full as ge: if simulation_status != STARTED: print('Queue Full and exit simulation') break simulation_datetime += timedelta(seconds=AT_ONCE_SECONDS) gevent.sleep() print('load done', simulation_datetime, 'tick len', len(data), 'real time', datetime.now()) simulation_status = REQUEST_FINISH while not deliver_greenlet.dead: gevent.sleep(1) simulation_status = STOPPED stub.SetSimulationStatus( stock_provider_pb2.SimulationStatus(simulation_on=False, simulation_speed=speed))
def start_sync(host=DEFAULT_API_HOST, version=DEFAULT_API_VERSION, key=DEFAULT_API_KEY, extra_params=DEFAULT_API_EXTRA_PARAMS): """ Start retrieving from Openprocurement API. :param: host (str): Url of Openprocurement API. Defaults is DEFAULT_API_HOST version (str): Verion of Openprocurement API. Defaults is DEFAULT_API_VERSION key(str): Access key of broker in Openprocurement API. Defaults is DEFAULT_API_KEY (Empty string) extra_params(dict): Extra params of query :returns: queue: Queue which containing objects derived from the list of tenders forward_worker: Greenlet of forward worker backfard_worker: Greenlet of backfard worker """ forward = TendersClientSync(key, host, version) backfard = TendersClientSync(key, host, version) Cookie = forward.headers['Cookie'] = backfard.headers['Cookie'] backfard_params = {'descending': True, 'feed': 'changes'} backfard_params.update(extra_params) forward_params = {'feed': 'changes'} forward_params.update(extra_params) response = backfard.sync_tenders(backfard_params) queue = Queue() for tender in response.data: queue.put(tender) backfard_params['offset'] = response.next_page.offset forward_params['offset'] = response.prev_page.offset backfard_worker = spawn(retriever_backward, queue, backfard, Cookie, backfard_params) forward_worker = spawn(retriever_forward, queue, forward, Cookie, forward_params) return queue, forward_worker, backfard_worker
class GeventActor(Greenlet): def __init__(self): super(GeventActor, self).__init__() self._inbox = Queue() self._running = False def tell(self, message): self._inbox.put(message) def on_receive(self, message): raise NotImplementedError() def _run(self): self._running = True while self._running: message = self._inbox.get() if not isinstance(message, Dokka.Messages.Message): continue if isinstance(message, Dokka.Messages.StopActor): self._running = False self.on_receive(message)
class BufferedOpusEncoderPlayable(BasePlayable, AbstractOpus, OpusEncoder): def __init__(self, source, *args, **kwargs): self.source = source self.frames = Queue(kwargs.pop('queue_size', 4096)) super(BufferedOpusEncoderPlayable, self).__init__(*args, **kwargs) gevent.spawn(self._encoder_loop) def _encoder_loop(self): while self.source: raw = self.source.read(self.frame_size) if len(raw) < self.frame_size: break self.frames.put(self.encode(raw, self.samples_per_frame)) gevent.idle() self.source = None def next_frame(self): if not self.source: return None return self.frames.get()
def test_412(self, gevent_sleep): gevent_sleep.side_effect = custom_sleep self.worker.kill() filtered_tender_ids_queue = Queue(10) filtered_tender_ids_queue.put('123') setup_routing(self.api_server_bottle, response_spore) setup_routing(self.api_server_bottle, generate_response, path='/api/2.3/tenders/123') client = TendersClientSync('', host_url='http://127.0.0.1:20604', api_version='2.3') self.assertEqual(client.headers['Cookie'], 'SERVER_ID={}'.format(SPORE_COOKIES)) # check that response_spore set cookies worker = FilterTenders.spawn(client, filtered_tender_ids_queue, self.edrpou_codes_queue, self.process_tracker, MagicMock(), self.sleep_change_value) data = Data('123', '124', CODES[0], 'awards', {'meta': {'sourceRequests': ['125']}}) for i in [data]: self.check_data_objects(self.edrpou_codes_queue.get(), i) self.assertEqual(client.headers['Cookie'], 'SERVER_ID={}'.format(COOKIES_412)) # check that response_412 change cookies self.assertEqual(self.edrpou_codes_queue.qsize(), 0) self.assertItemsEqual(self.process_tracker.processing_items.keys(), ['123_124']) worker.shutdown() del worker
def stream(self): self.count += 1 queue_id = self.count if not async_wrapper.enabled: # print('WARNING! Stream functionality will not work without gevent') return q = Queue() self.queues[self.count] = q try: # Return all ready events # yield 'stored', jsonify(self.session.events()) # Return events as they are placed into the queues for item in q: yield item except GeneratorExit: self.queues.pop(queue_id) q.put(StopIteration)
class NotifyingQueue(Event): """ A queue that follows the wait protocol. """ def __init__(self): super(NotifyingQueue, self).__init__() self._queue = Queue() def put(self, item): """ Add new item to the queue. """ self._queue.put(item) self.set() def get(self, block=True, timeout=None): """ Removes and returns an item from the queue. """ value = self._queue.get(block, timeout) if self._queue.empty(): self.clear() return value def stop(self): """ Request a stop event. """ self.set()
class Task(gevent.Greenlet): """ Base class used to created tasks. Note: Always call super().__init__(). """ def __init__(self): super(Task, self).__init__() self.response_queue = Queue() def on_response(self, response): """ Add a new response message to the task queue. """ if log.isEnabledFor(logging.DEBUG): log.debug( 'RESPONSE MESSAGE RECEIVED %s %s', repr(self), response, ) self.response_queue.put(response)
class GreenletExecutor(AbstractExecutor): """ GreenletExecutor is an AbstractExecutor subclass that uses a pool of greenlets to execute calls asynchronously. NOTE: Use this executor for I/O-bound tasks. Since all greenlets are multiplexed on a single pthread, do NOT use this for compute-bound callables. Try using the GIPCExecutor instead. """ def __init__(self, num_greenlets=50, **kwargs): super(GreenletExecutor, self).__init__(**kwargs) self.pool = Pool(size=num_greenlets) self.task_queue = Queue() self.num_ready = 0 def _shutdown(self): for _ in xrange(len(self.pool)): self.task_queue.put(None) if self.force_kill_on_shutdown: self.pool.kill() else: self.pool.join() def _worker_loop(self): try: self.num_ready += 1 while True: self.num_ready -= 1 task = self.task_queue.get() if task is None: return task.execute() self.num_ready += 1 except: pass def _submit(self, task): self.task_queue.put(task) if not self.num_ready and self.pool.free_count(): self.pool.spawn(self._worker_loop)
class NotifyingQueue(Event, Generic[T]): """This is not the same as a JoinableQueue. Here, instead of waiting for all the work to be processed, the wait is for work to be available. """ def __init__(self, maxsize: int = None, items: Iterable[T] = ()) -> None: super().__init__() self.queue = Queue(maxsize, items) if items: self.set() def put(self, item: T) -> None: """ Add new item to the queue. """ self.queue.put(item) self.set() def get(self, block: bool = True, timeout: float = None) -> T: """ Removes and returns an item from the queue. """ value = self.queue.get(block, timeout) if self.queue.empty(): self.clear() return value def peek(self, block: bool = True, timeout: float = None) -> T: return self.queue.peek(block, timeout) def __len__(self) -> int: return len(self.queue) def copy(self) -> List[T]: """ Copies the current queue items. """ copy = self.queue.copy() result = list() while not copy.empty(): result.append(copy.get_nowait()) return result def __repr__(self) -> str: return f"NotifyingQueue(id={id(self)}, num_items={len(self.queue)})"
class IMapUnordered(Greenlet): def __init__(self, func, iterable, spawn=None): from gevent.queue import Queue Greenlet.__init__(self) if spawn is not None: self.spawn = spawn self.func = func self.iterable = iterable self.queue = Queue() self.count = 0 self.rawlink(self._on_finish) def __iter__(self): return self def next(self): value = self.queue.get() if isinstance(value, Failure): raise value.exc return value if PY3: __next__ = next del next def _run(self): try: func = self.func empty = True for item in self.iterable: self.count += 1 self.spawn(func, item).rawlink(self._on_result) empty = False if empty: self.queue.put(Failure(StopIteration)) finally: self.__dict__.pop('spawn', None) self.__dict__.pop('func', None) self.__dict__.pop('iterable', None) def _on_result(self, greenlet): self.count -= 1 if greenlet.successful(): self.queue.put(greenlet.value) if self.ready() and self.count <= 0: self.queue.put(Failure(StopIteration)) def _on_finish(self, _self): if not self.successful(): self.queue.put(Failure(self.exception))