def __init__(self, name="QUEUE"): super(SRQueue, self).__init__() self.name = name self.queue = PriorityQueue() self._result_queue = Queue() self.processing = [] self.min_priority = SRQueuePriorities.EXTREME self.amActive = False self.stop = False
def __init__(self, stream, address, server=None): self.stream = stream self.address = address self.is_authed = False self.last_message = None self.server = server self.id = hash(datetime.now()) self._queue = PriorityQueue() self._queue.join() logger.debug('Worker for {} is initiated'.format(address))
def __init__(self, name="QUEUE"): super(SRQueue, self).__init__() self.name = name self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'}) self.queue = PriorityQueue() self._result_queue = Queue() self._queue_items = [] self.processing = [] self.min_priority = SRQueuePriorities.EXTREME self.amActive = False self.stop = False
class __Server: def __init__(self): scheduler = IOLoopScheduler(IOLoop.current()) self._app = Application([ (r'/exchange', ExchangeHandler), (r'/', MainHandler), ]) self.orders = PriorityQueue() self.posted_orders = [] self.fulfilled_orders = [] self.messages = Subject() only_messages = self.messages \ .filter(lambda msg: msg[0] == 'message') \ .map(lambda msg: msg[1].split(',')) \ .publish() def queue_order(msg): self.orders.put(Order.from_list(msg)) only_messages \ .filter(lambda msg: msg[0] == 'order') \ .map(lambda msg: msg[1:]) \ .subscribe(queue_order) def process_order(time): try: order = self.orders.get_nowait() print('processing order: {} [{}]'.format( order, order.timestamp)) matching = None for posted in self.posted_orders: if posted.matches(order): matching = posted break if matching is None: self.posted_orders.append(order) print('could not find match, posted order count is {}'. format(len(self.posted_orders))) else: self.posted_orders.remove(posted) self.fulfilled_orders.append(posted) self.fulfilled_orders.append(order) print('order fulfilled: {}'.format(order)) print('fulfilled by: {}'.format(posted)) except QueueEmpty: pass Observable.interval(100, scheduler=scheduler).subscribe(process_order) only_messages.connect() def start(self): self._app.listen(8888)
def start(self): """register dispatchers for streams""" self.io_loop = ioloop.IOLoop.current() self.msg_queue = PriorityQueue() self.io_loop.add_callback(self.dispatch_queue) if self.control_stream: self.control_stream.on_recv( partial( self.schedule_dispatch, CONTROL_PRIORITY, self.dispatch_control, ), copy=False, ) for s in self.shell_streams: if s is self.control_stream: continue s.on_recv( partial( self.schedule_dispatch, SHELL_PRIORITY, self.dispatch_shell, s, ), copy=False, ) # publish idle status self._publish_status('starting')
def __init__(self, name=None, max_count=10, dt=0.5, client_set=None): global clients_set self.name = name or hash(datetime.now()) self.max_count = max_count self.dt = dt self.authorized = False self.count = 0 if client_set is None: self.client_set = set() self.client_set.add(self) else: self.client_set = client_set self._event_queue = PriorityQueue() self._event_queue.join() logger.debug('client start {}'.format(self.name)) IOLoop.current().spawn_callback(self.init) IOLoop.current().spawn_callback(self.start_produce)
def __init__(self, register, name=None): self._queue = PriorityQueue(maxsize=options.queries_number) self.queries_number = options.queries_number self.stop_time = datetime.now() + timedelta(seconds=options.test_time) self.register = register self.__count = 0 self.__processed = 0 if register is not None: register.add(self) if name is None: self.name = uuid4() else: self.name = name io_loop = IOLoop.instance() io_loop.spawn_callback(self.consume) io_loop.spawn_callback(self.start)
class Client(object): def __init__(self, register, name=None): self._queue = PriorityQueue(maxsize=options.queries_number) self.queries_number = options.queries_number self.stop_time = datetime.now() + timedelta(seconds=options.test_time) self.register = register self.__count = 0 self.__processed = 0 if register is not None: register.add(self) if name is None: self.name = uuid4() else: self.name = name io_loop = IOLoop.instance() io_loop.spawn_callback(self.consume) io_loop.spawn_callback(self.start) @gen.coroutine def start(self): yield self.produce() yield self._queue.join() @gen.coroutine def exit(self): logger.debug('client {} exiting'.format(self.name)) self.register.remove(self) if not self.register: IOLoop.instance().stop() logger.debug('all clients exited') else: logger.debug('{} clients left'.format(len(self.register))) @gen.coroutine def produce(self): logger.debug('producer {} start'.format(self.name)) while True: if self.__count >= self.queries_number or datetime.now() > self.stop_time: break p = randint(0, 100) res = 'r' if float(p) / 100. <= options.rw_ratio: res = 'w' yield self._queue.put((randint(0, 10), res)) self.__count += 1 logger.debug('producer {} finished'.format(self.name)) self.stop_time = datetime.now() + timedelta(seconds=options.test_time) @gen.coroutine def consume(self): logger.debug('consumer {} start'.format(self.name)) while True: if datetime.now() > self.stop_time or self.__processed >= self.queries_number: logger.debug('{}'.format(datetime.now() > self.stop_time)) break pr, tp = yield self._queue.get() w = Worker(self.name) # if tp == 'r': # yield w.read_worker() # elif tp == 'w': # yield w.write_worker() yield gen.sleep(0.00000) # hack if tp == 'w': yield w.write_worker() elif tp == 'r': yield w.read_worker() self.__processed += 1 self._queue.task_done() logger.debug('consumer {} finished'.format(self.name)) yield gen.sleep(0.00000) # hack yield self.exit()
class SRQueue(object): def __init__(self, name="QUEUE"): super(SRQueue, self).__init__() self.name = name self.queue = PriorityQueue() self._result_queue = Queue() self.processing = [] self.min_priority = SRQueuePriorities.EXTREME self.amActive = False self.stop = False async def watch(self): """ Process items in this queue """ self.amActive = True while not (self.stop and self.queue.empty()): if not self.is_paused and not len(self.processing) >= int( sickrage.app.config.max_queue_workers): sickrage.app.io_loop.run_in_executor(None, self.worker, await self.get()) await gen.sleep(1) self.amActive = False def worker(self, item): threading.currentThread().setName(item.name) item.thread_id = threading.currentThread().ident try: item.is_alive = True self.processing.append(item) item.run() except QueueItemStopException: pass except Exception: sickrage.app.log.debug(traceback.format_exc()) finally: self.processing.remove(item) self.queue.task_done() async def get(self): return await self.queue.get() async def put(self, item, *args, **kwargs): """ Adds an item to this queue :param item: Queue object to add :return: item """ if self.stop: return item.added = datetime.datetime.now() item.name = "{}-{}".format(self.name, item.name) item.result_queue = self._result_queue await self.queue.put(item) return item @property def queue_items(self): return self.queue._queue + self.processing @property def is_busy(self): return bool(len(self.queue_items) > 0) @property def is_paused(self): return self.min_priority == SRQueuePriorities.PAUSED def pause(self): """Pauses this queue""" sickrage.app.log.info("Pausing {}".format(self.name)) self.min_priority = SRQueuePriorities.PAUSED def unpause(self): """Unpauses this queue""" sickrage.app.log.info("Un-pausing {}".format(self.name)) self.min_priority = SRQueuePriorities.EXTREME def remove(self, item): if item in self.queue._queue: self.queue._queue.remove(item) elif item in self.processing: self.processing.remove(item) def stop_item(self, item): if not item.is_alive: return if ctypes.pythonapi.PyThreadState_SetAsyncExc( ctypes.c_long(item.thread_id), ctypes.py_object(QueueItemStopException)) > 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(item.thread_id, None)
from tornado.queues import PriorityQueue q = PriorityQueue() q.put((1, 'medium-priority item')) q.put((0, 'high-priority item')) q.put((10, 'low-priority item')) print(q.get_nowait()) print(q.get_nowait()) print(q.get_nowait())
import os from concurrent.futures import ThreadPoolExecutor from tornado.queues import PriorityQueue, QueueEmpty from tornado.gen import sleep, coroutine from tokit import Event, on, logger from inspect import iscoroutinefunction from email.mime.text import MIMEText import smtplib from email.header import Header from tornado.gen import coroutine from tornado.concurrent import run_on_executor tasks_queue = PriorityQueue() def put(name, *args, priority=0, **kwargs): """ Schedule a task with given params Handlers of event with same name will be used when execute task Example:: @on('task_xyz') def do_something(arg1): pass put('task_xyz', 'val1') """ tasks_queue.put((priority, {'name': name, 'args': args, 'kwargs': kwargs}))
#!urs/bin/env python #coding:utf-8 # PriorityQueue:一个又优先级的Queue最小的最最优先。 # 写入条目通常是元组,类似(proority number, data)。 from tornado.queues import PriorityQueue q = PriorityQueue() q.put((1, 'medium-priority item')) q.put((0, 'high-priority item')) q.put((10, 'low-priority item')) print(q.get_nowait()) print(q.get_nowait()) print(q.get_nowait())
class Worker(object): """ Description ----------- Worker for stream proccessing """ stream = None address = None is_authed = None name = None id = None _queue = None def __init__(self, stream, address, server=None): self.stream = stream self.address = address self.is_authed = False self.last_message = None self.server = server self.id = hash(datetime.now()) self._queue = PriorityQueue() self._queue.join() logger.debug('Worker for {} is initiated'.format(address)) def run(self): self.stream.set_close_callback(lambda: self.on_close(hard=True)) self._read_line() def on_close(self, hard=False): self._queue.put((300, self.sockets_broadcast('closed', self.name))) self.is_authed = False self.name = None if self.stream and \ self.stream.closed() and \ self in self.server._workers and \ hard: self.server._workers.remove(self) logger.debug("worker for {} is closed".format(self.address)) def on_auth(self, message): self.is_authed = True self.name = message.value self._queue.put((1, self.sockets_broadcast('opened', self.name))) @gen.coroutine def sockets_broadcast(self, label, message): if self.server and self.is_authed: msg = {label: message, 'worker': self.name, 'id': self.id} msg = unicode(json.dumps(msg)) for socket in self.server.sockets: socket.write_message(msg) @gen.coroutine def _read_line(self): self.stream.read_until(helpers.ENDING, self._handle_read) @gen.coroutine def _handle_read(self, data_): if not self.stream.closed(): data = data_.rstrip() message = helpers.Message(data) logger.debug('[{}][{}][{}]'.format(self.address, unicode(message), data)) if message.type == helpers.Message.MESSAGE and self.is_authed: self.last_message = message self._queue.put((100, self.sockets_broadcast('message', unicode(message)))) if not self.is_authed: if message.type == helpers.Message.AUTH: self.on_auth(message) else: if message.type == helpers.Message.END: self.on_close() self._read_line()
class Client(object): name = None conn = None max_count = None count = None dt = None _event_queue = None def __init__(self, name=None, max_count=10, dt=0.5, client_set=None): global clients_set self.name = name or hash(datetime.now()) self.max_count = max_count self.dt = dt self.authorized = False self.count = 0 if client_set is None: self.client_set = set() self.client_set.add(self) else: self.client_set = client_set self._event_queue = PriorityQueue() self._event_queue.join() logger.debug('client start {}'.format(self.name)) IOLoop.current().spawn_callback(self.init) IOLoop.current().spawn_callback(self.start_produce) @gen.coroutine def init(self): logger.debug('client init {}'.format(self.name)) self.conn = yield TCPClient().connect( config.options.host, config.options.port_tcp ) s = self.create_message(helpers.Message.AUTH) self.conn.write(s) @gen.coroutine def exit(self, force=False): logger.debug('client exit {}'.format(self.name)) if self.conn and not self.conn.closed(): s = self.create_message(helpers.Message.END) priority = 300 if not force else 0 self._event_queue.put((priority, self.conn.write(s))) self._event_queue.put((priority, self.conn.close())) if self in self.client_set: self.client_set.remove(self) if not self.client_set: IOLoop.instance().stop() print len(self.client_set) @gen.coroutine def start_produce(self): logger.debug('client producing {}'.format(self.name)) while True: yield gen.sleep(self.dt) if not self.conn.closed(): value = self.count key = randint(1, 1000) s = self.create_message(key=key, value=value) yield self._event_queue.put((100, self.conn.write(s))) self.count += 1 if self.count >= self.max_count: break yield self.exit() logger.debug('client stopped {}'.format(self.name)) def create_message(self, type_=helpers.Message.MESSAGE, key=None, value=None): res = None if type_ == helpers.Message.AUTH: res = helpers.get_auth_str(self.name) elif type_ == helpers.Message.END: res = helpers.get_end_str() elif type_ == helpers.Message.MESSAGE: res = helpers.format_string(key, value) return bytes(res)