コード例 #1
0
ファイル: admin.py プロジェクト: domogik/domogik
 def __init__(self):
     self.ctx = zmq.Context()
     self.WSmessages = Queue()
     self.MQmessages = Queue()
     self.sub = MQAsyncSub.__init__(self, self.ctx, 'admin', [])
     self.pub = MQPub(self.ctx, 'admin-ws')
     self.subscribers = set()
コード例 #2
0
ファイル: test_plugin.py プロジェクト: dela3499/distributed
def test_diagnostic(s, a, b):
    sched, report = Queue(), Queue(); s.handle_queues(sched, report)
    msg = yield report.get(); assert msg['op'] == 'stream-start'

    class Counter(SchedulerPlugin):
        def start(self, scheduler):
            scheduler.add_plugin(self)
            self.count = 0

        def task_finished(self, scheduler, key, worker, nbytes):
            self.count += 1

    counter = Counter()
    counter.start(s)

    assert counter.count == 0
    sched.put_nowait({'op': 'update-graph',
                      'tasks': {'x': dumps_task((inc, 1)),
                                'y': dumps_task((inc, 'x')),
                                'z': dumps_task((inc, 'y'))},
                      'dependencies': {'y': ['x'], 'z': ['y']},
                      'keys': ['z']})

    while True:
        msg = yield report.get()
        if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
            break

    assert counter.count == 3
コード例 #3
0
ファイル: pubnub_tornado.py プロジェクト: pubnub/python
 def __init__(self):
     self.connected = False
     self.connected_event = Event()
     self.disconnected_event = Event()
     self.presence_queue = Queue()
     self.message_queue = Queue()
     self.error_queue = Queue()
コード例 #4
0
ファイル: test_widgets.py プロジェクト: aterrel/distributed
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        s.update_graph(dsk={'x-1': (inc, 1),
                            'x-2': (inc, 'x-1'),
                            'x-3': (inc, 'x-2'),
                            'y-1': (dec, 'x-3'),
                            'y-2': (dec, 'y-1'),
                            'e': (throws, 'y-2'),
                            'other': (inc, 123)},
                       keys=['e'])

        while True:
            msg = yield report.get()
            if msg['op'] == 'key-in-memory' and msg['key'] == 'y-2':
                break

        p = MultiProgressWidget(['x-1', 'x-2', 'x-3'], scheduler=s)
        assert set(concat(p.all_keys.values())).issuperset({'x-1', 'x-2', 'x-3'})
        assert 'x' in p.bars

        sched.put_nowait({'op': 'close'})
        yield done
コード例 #5
0
        def __init__(self, max_concurrent_batches=10, block_on_send=False,
                    block_on_response=False, max_batch_size=100, send_frequency=0.25,
                    user_agent_addition=''):
            if not has_tornado:
                raise ImportError('TornadoTransmission requires tornado, but it was not found.')

            self.block_on_send = block_on_send
            self.block_on_response = block_on_response
            self.max_batch_size = max_batch_size
            self.send_frequency = send_frequency

            user_agent = "libhoney-py/" + VERSION
            if user_agent_addition:
                user_agent += " " + user_agent_addition

            self.http_client = AsyncHTTPClient(
                force_instance=True,
                defaults=dict(user_agent=user_agent))

            # libhoney adds events to the pending queue for us to send
            self.pending = Queue(maxsize=1000)
            # we hand back responses from the API on the responses queue
            self.responses = Queue(maxsize=2000)

            self.batch_data = {}
            self.sd = statsd.StatsClient(prefix="libhoney")
            self.batch_sem = Semaphore(max_concurrent_batches)
コード例 #6
0
ファイル: test_widgets.py プロジェクト: aterrel/distributed
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        s.update_graph(dsk={'x-1': (inc, 1),
                            'x-2': (inc, 'x-1'),
                            'x-3': (inc, 'x-2'),
                            'y-1': (dec, 'x-3'),
                            'y-2': (dec, 'y-1'),
                            'e': (throws, 'y-2'),
                            'other': (inc, 123)},
                       keys=['e'])

        while True:
            msg = yield report.get()
            if msg['op'] == 'task-erred' and msg['key'] == 'e':
                break

        p = MultiProgressWidget(['e'], scheduler=s, complete=True)
        assert set(concat(p.all_keys.values())) == {'x-1', 'x-2', 'x-3', 'y-1',
                'y-2', 'e'}
        assert all(b.value == 1.0 for b in p.bars.values())
        assert p.texts['x'].value == '3 / 3'
        assert p.texts['y'].value == '2 / 2'

        sched.put_nowait({'op': 'close'})
        yield done
コード例 #7
0
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        s.update_graph(dsk={'x': (div, 1, 0)},
                       keys=['x'])
        progress = TextProgressBar(['x'], scheduler=s)
        progress.start()

        while True:
            msg = yield report.get()
            if msg.get('key') == 'x':
                break

        assert progress.status == 'error'
        assert not progress._timer.is_alive()

        progress = TextProgressBar(['x'], scheduler=s)
        progress.start()
        assert progress.status == 'error'
        assert not progress._timer or not progress._timer.is_alive()

        sched.put_nowait({'op': 'close'})
        yield done
コード例 #8
0
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        s.update_graph(dsk={'x': (inc, 1),
                            'y': (inc, 'x'),
                            'z': (inc, 'y')},
                       keys=['z'])
        progress = TextProgressBar(['z'], scheduler=s)
        progress.start()

        assert progress.all_keys == {'x', 'y', 'z'}
        assert progress.keys == {'x', 'y', 'z'}

        while True:
            msg = yield report.get()
            if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
                break

        assert progress.keys == set()
        check_bar_completed(capsys)

        assert progress not in s.plugins

        sched.put_nowait({'op': 'close'})
        yield done
コード例 #9
0
ファイル: py-server.py プロジェクト: beef9999/go-chatroom
 def __init__(self, server, name, stream):
     self.server = server
     self.name = name
     self.rooms = {}
     self.stream = stream
     self.inqueue = Queue(maxsize=QUEUE_SIZE)
     self.outqueue = Queue(maxsize=QUEUE_SIZE)
コード例 #10
0
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        class Counter(SchedulerPlugin):
            def start(self, scheduler):
                scheduler.add_plugin(self)
                self.count = 0

            def task_finished(self, scheduler, key, worker, nbytes):
                self.count += 1

        counter = Counter()
        counter.start(s)

        assert counter.count == 0
        sched.put_nowait({'op': 'update-graph',
               'dsk': {'x': (inc, 1),
                       'y': (inc, 'x'),
                       'z': (inc, 'y')},
               'keys': ['z']})

        while True:
            msg = yield report.get()
            if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
                break

        assert counter.count == 3

        sched.put_nowait({'op': 'close'})
        yield done
コード例 #11
0
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        class Bad(SchedulerPlugin):
            def task_finished(self, scheduler, key, worker, nbytes):
                raise Exception()

        bad = Bad()
        s.add_plugin(bad)

        sched.put_nowait({'op': 'update-graph',
                          'dsk': {'x': (inc, 1),
                                  'y': (inc, 'x'),
                                  'z': (inc, 'y')},
                          'keys': ['z']})

        while True:  # normal execution
            msg = yield report.get()
            if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
                break

        sched.put_nowait({'op': 'close'})
        yield done
コード例 #12
0
ファイル: scheduler.py プロジェクト: freeman-lab/distributed
    def __init__(self, center, delete_batch_time=1):
        self.scheduler_queue = Queue()
        self.report_queue = Queue()
        self.delete_queue = Queue()
        self.status = None

        self.center = coerce_to_rpc(center)

        self.dask = dict()
        self.dependencies = dict()
        self.dependents = dict()
        self.generation = 0
        self.has_what = defaultdict(set)
        self.held_data = set()
        self.in_play = set()
        self.keyorder = dict()
        self.nbytes = dict()
        self.ncores = dict()
        self.processing = dict()
        self.restrictions = dict()
        self.stacks = dict()
        self.waiting = dict()
        self.waiting_data = dict()
        self.who_has = defaultdict(set)

        self.exceptions = dict()
        self.tracebacks = dict()
        self.exceptions_blame = dict()

        self.delete_batch_time = delete_batch_time
コード例 #13
0
ファイル: app.py プロジェクト: jefffm/swimpy
    def __init__(self, routes, node, pipe):
        """
        Application instantiates and registers handlers for each message type,
        and routes messages to the pre-instantiated instances of each message handler

        :param routes: list of tuples in the form of (<message type str>, <MessageHandler class>)
        :param node: Node instance of the local node
        :param pipe: Instance of multiprocessing.Pipe for communicating with the parent process
        """
        # We don't really have to worry about synchronization
        # so long as we're careful about explicit context switching
        self.nodes = {node.node_id: node}

        self.local_node = node
        self.handlers = {}

        self.tcpclient = TCPClient()

        self.gossip_inbox = Queue()
        self.gossip_outbox = Queue()

        self.sequence_number = 0

        if routes:
            self.add_handlers(routes)

        self.pipe = pipe
        self.ioloop = IOLoop.current()

        self.add_node_event = Event()
コード例 #14
0
ファイル: admin.py プロジェクト: ewintec/domogik
class Publisher(MQAsyncSub):
    """Handles new data to be passed on to subscribers."""
    def __init__(self):
        self.WSmessages = Queue()
        self.MQmessages = Queue()
        self.sub = MQAsyncSub.__init__(self, zmq.Context(), 'admin', [])
        self.subscribers = set()

    def register(self, subscriber):
        """Register a new subscriber."""
        self.subscribers.add(subscriber)

    def deregister(self, subscriber):
        """Stop publishing to a subscriber."""
        self.subscribers.remove(subscriber)

    @gen.coroutine
    def on_message(self, did, msg):
        """Receive message from MQ sub and send to WS."""
        yield self.WSmessages.put({"msgid": did, "content": msg})

    @gen.coroutine
    def submit(self, message):
        """Submit a new message to publish to subscribers."""
        yield self.WSmessages.put(message)

    @gen.coroutine
    def publishToWS(self):
        while True:
            message = yield self.WSmessages.get()
            if len(self.subscribers) > 0:
                print("Pushing MQ message {} to {} WS subscribers...".format(
                    message, len(self.subscribers)))
                yield [subscriber.submit(message) for subscriber in self.subscribers]

    @gen.coroutine
    def publishToMQ(self):
        ctx = zmq.Context()
        cli = MQSyncReq(ctx)
        pub = MQPub(ctx, 'admin')
        while True:
            message = yield self.MQmessages.get()
            jsons = json.loads(message)
            # req/rep
            if 'mq_request' in jsons and 'data' in jsons:
                msg = MQMessage()
                msg.set_action(str(jsons['mq_request']))
                msg.set_data(jsons['data'])
                print("REQ : {0}".format(msg.get()))
                if 'dst' in jsons:
                    print cli.request(str(jsons['dst']), msg.get(), timeout=10).get()
                else:
                    print cli.request('manager', msg.get(), timeout=10).get()
            # pub
            elif 'mq_publish' in jsons and 'data' in jsons:
                print("Publish : {0}".format(jsons['data']))
                pub.send_event(jsons['mq_publish'],
                                    jsons['data'])
コード例 #15
0
def run(args):
    if not args.test:
        ip_iter = _create_ip_iterator()
    else:
        ip_iter = _get_test_ips()
        good_ips = []

    job_queue = Queue(maxsize=200)

    start = time.time()
    counter = Counter()

    @gen.coroutine
    def job_producer():
        for ip in ip_iter:
            yield job_queue.put(ip)
            #print("Put {}".format(ip))

    @gen.coroutine
    def worker(id):
        while True:
            ip = yield job_queue.get()
            try:
                good = yield test_ip(ip)
                counter['all'] += 1
                if args.progress:
                    if counter['all'] % 10000 == 0:
                        print("Tested {} ips.".format(counter['all']))
                if good:
                    print("Found good ip: {}".format(ip))
                    counter['good'] += 1
                    if not args.test:
                        yield record_good_ip(ip)
                    else:
                        good_ips.append(ip)
            finally:
                job_queue.task_done()

    for i in range(CONCURRENCY):
        worker(i)

    _disable_logging()

    try:
        yield job_producer()
        yield job_queue.join()
    finally:
        print("\n\nTested: {} ips\nFound {} good ips\nQps: {}".format(
            counter['all'],
            counter['good'],
            counter['all'] / (time.time() - start)
        ))

    if args.test and args.remove:
        with open(GOOD_IP_FILE + '_removed', 'w') as f:
            f.write('|'.join(good_ips))
コード例 #16
0
ファイル: executor.py プロジェクト: canavandl/distributed
def _first_completed(futures):
    """ Return a single completed future

    See Also:
        _as_completed
    """
    q = Queue()
    yield _as_completed(futures, q)
    result = yield q.get()
    raise gen.Return(result)
コード例 #17
0
ファイル: batched.py プロジェクト: broxtronix/distributed
    def __init__(self, stream, interval):
        self.stream = stream
        self.interval = interval / 1000.0
        self.last_transmission = default_timer()
        self.send_q = Queue()
        self.recv_q = Queue()
        self._background_send_coroutine = self._background_send()
        self._background_recv_coroutine = self._background_recv()
        self._broken = None

        self.pc = PeriodicCallback(lambda: None, 100)
        self.pc.start()
コード例 #18
0
ファイル: executor.py プロジェクト: cowlicks/distributed
    def __init__(self, center, start=True, delete_batch_time=1):
        self.center = coerce_to_rpc(center)
        self.futures = dict()
        self.refcount = defaultdict(lambda: 0)
        self.dask = dict()
        self.restrictions = dict()
        self.loop = IOLoop()
        self.report_queue = Queue()
        self.scheduler_queue = Queue()
        self._shutdown_event = Event()
        self._delete_batch_time = delete_batch_time

        if start:
            self.start()
コード例 #19
0
ファイル: files.py プロジェクト: vizydrop/apps
    def get_file_list(account, **kwargs):
        queue = Queue()
        sem = BoundedSemaphore(FETCH_CONCURRENCY)
        done, working = set(), set()
        data = set()

        @gen.coroutine
        def fetch_url():
            current_url = yield queue.get()
            try:
                if current_url in working:
                    return
                page_no = working.__len__()
                app_log.info("Fetching page {}".format(page_no))
                working.add(current_url)
                req = account.get_request(current_url)
                client = AsyncHTTPClient()
                response = yield client.fetch(req)
                done.add(current_url)
                app_log.info("Page {} downloaded".format(page_no))
                response_data = json.loads(response.body.decode('utf-8'))

                for file in response_data:
                    # be sure we're a valid file type and less than our maximum response size limit
                    extension = file['path'].lower().split('.')[-1]
                    if extension in VALID_FILETYPES and int(file['bytes']) < RESPONSE_SIZE_LIMIT * 1000000:
                        data.add((file['path'].lstrip('/'), file['path'], ))
                app_log.info("Page {} completed".format(page_no))
            finally:
                queue.task_done()
                sem.release()

        @gen.coroutine
        def worker():
            while True:
                yield sem.acquire()
                fetch_url()

        app_log.info("Gathering filelist for account {}".format(account._id))
        for file_type in VALID_FILETYPES:
            file_type = '.'.join([file_type])
            url = "https://api.dropbox.com/1/search/auto/?query={}&include_membership=true".format(file_type)
            queue.put(url)
        # start our concurrency worker
        worker()
        # wait until we're done
        yield queue.join(timeout=timedelta(seconds=MAXIMUM_REQ_TIME))
        app_log.info("Finished list retrieval. Found {} items.".format(data.__len__()))
        return sorted([{"title": title, "value": path} for title, path in data], key=lambda f: f['title'])
コード例 #20
0
ファイル: executor.py プロジェクト: aterrel/distributed
def as_completed(fs):
    if len(set(f.executor for f in fs)) == 1:
        loop = first(fs).executor.loop
    else:
        # TODO: Groupby executor, spawn many _as_completed coroutines
        raise NotImplementedError(
        "as_completed on many event loops not yet supported")

    from .compatibility import Queue
    queue = Queue()

    coroutine = lambda: _as_completed(fs, queue)
    loop.add_callback(coroutine)

    for i in range(len(fs)):
        yield queue.get()
コード例 #21
0
ファイル: tcpclient_test.py プロジェクト: leeclemens/tornado
 def __init__(self, family):
     super(TestTCPServer, self).__init__()
     self.streams = []
     self.queue = Queue()
     sockets = bind_sockets(None, 'localhost', family)
     self.add_sockets(sockets)
     self.port = sockets[0].getsockname()[1]
コード例 #22
0
ファイル: pubnub_tornado.py プロジェクト: pubnub/python
    def __init__(self, pubnub_instance):

        subscription_manager = self

        self._message_queue = Queue()
        self._consumer_event = Event()
        self._cancellation_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = TornadoReconnectionManager(pubnub_instance)

        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class TornadoReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = TornadoReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)
コード例 #23
0
ファイル: tornao_send.py プロジェクト: DashShen/Journey
class TopicAppllication(tornado.web.Application):

    def __init__(self):
        handlers = [
            url(r'/', MainHandler)
        ]
        self.queue = Queue(maxsize=10)
        super(TopicAppllication, self).__init__(handlers=handlers, debug=True)

    @gen.coroutine
    def consumer(self):
        item = yield self.queue.get()
        try:
            print item
        finally:
            self.queue.task_done()
コード例 #24
0
ファイル: handlers.py プロジェクト: mivade/tornadose
class BaseHandler(RequestHandler):
    """Base handler for subscribers. To be compatible with data stores
    defined in :mod:`tornadose.stores`, custom handlers should inherit
    this class and implement the :meth:`publish` method.

    """
    def initialize(self, store):
        """Common initialization of handlers happens here. If additional
        initialization is required, this method must either be called with
        ``super`` or the child class must assign the ``store`` attribute and
        register itself with the store.

        """
        assert isinstance(store, stores.BaseStore)
        self.messages = Queue()
        self.store = store
        self.store.register(self)

    @gen.coroutine
    def submit(self, message):
        """Submit a new message to be published."""
        yield self.messages.put(message)

    def publish(self):
        """Push a message to the subscriber. This method must be
        implemented by child classes.

        """
        raise NotImplementedError('publish must be implemented!')
コード例 #25
0
ファイル: Server.py プロジェクト: rwth-i6/returnn
  def __init__(self, config_file):
    self.lock = locks.Lock()
    self.classification_queue = Queue()

    print('loading config %s' % config_file, file=log.v5)
    # Load and setup config
    try:
      self.config = Config.Config()
      self.config.load_file(config_file)
      self.pause_after_first_seq = self.config.float('pause_after_first_seq', 0.2)
      self.batch_size = self.config.int('batch_size', 5000)
      self.max_seqs = self.config.int('max_seqs', -1)
    except Exception:
      print('Error: loading config %s failed' % config_file, file=log.v1)
      raise

    try:
      self.devices = self._init_devices()
    except Exception:
      print('Error: Loading devices for config %s failed' % config_file, file=log.v1)
      raise

    print('Starting engine for config %s' % config_file, file=log.v5)
    self.engine = Engine.Engine(self.devices)
    try:
      self.engine.init_network_from_config(config=self.config)
    except Exception:
      print('Error: Loading network for config %s failed' % config_file, file=log.v1)
      raise

    IOLoop.current().spawn_callback(self.classify_in_background)

    self.last_used = datetime.datetime.now()
コード例 #26
0
ファイル: tcpclient_test.py プロジェクト: bdarnell/tornado
 def __init__(self, family):
     super(TestTCPServer, self).__init__()
     self.streams = []  # type: List[IOStream]
     self.queue = Queue()  # type: Queue[IOStream]
     sockets = bind_sockets(0, "localhost", family)
     self.add_sockets(sockets)
     self.port = sockets[0].getsockname()[1]
コード例 #27
0
 def __init__(self, **conf):
     self.username = conf["username"]
     self.passwd = conf["passwd"]
     self.save_path = conf.get("save_path")
     self._q = Queue()
     self._cookies = self._get_user_cookies()
     self._parse_save_path()
コード例 #28
0
ファイル: executor.py プロジェクト: aterrel/distributed
    def __init__(self, center=None, scheduler=None, start=True, delete_batch_time=1, loop=None):
        self.futures = dict()
        self.refcount = defaultdict(lambda: 0)
        self.loop = loop or IOLoop()
        self.scheduler_queue = Queue()
        self.report_queue = Queue()

        if scheduler:
            if isinstance(scheduler, Scheduler):
                self.scheduler = scheduler
                if not center:
                    self.center = scheduler.center
            else:
                raise NotImplementedError()
                # self.scheduler = coerce_to_rpc(scheduler)
        else:
            self.scheduler = Scheduler(center, loop=self.loop,
                                       delete_batch_time=delete_batch_time)
        if center:
            self.center = coerce_to_rpc(center)

        if not self.center:
            raise ValueError("Provide Center address")

        if start:
            self.start()
コード例 #29
0
ファイル: py-server.py プロジェクト: beef9999/go-chatroom
class Room(object):

    def __init__(self, server, name):
        self.server = server
        self.name = name
        self.clients = {}
        self.lock = threading.RLock()
        self.inqueue = Queue(maxsize=QUEUE_SIZE)

    @coroutine
    def dispatch(self):
        logging.debug('Chatroom: %s opened' % self.name)
        while True:
            msg = yield self.inqueue.get()
            logging.debug("Room got message: room[%s], command[%s], content[%s]",
                          msg.receiver, msg.command, msg.content)
            if msg.command == COMMAND_JOIN:
                logging.debug("%s joined", msg.sender.name)
                self.clients[msg.sender.name] = msg.sender
            elif msg.command == COMMAND_QUIT:
                del self.clients[msg.sender.name]
            yield self.broadcast(msg)

    @coroutine
    def broadcast(self, msg):
        for _, client in self.clients.items():
            yield client.inqueue.put(msg)
コード例 #30
0
ファイル: tcpclient_test.py プロジェクト: leeclemens/tornado
class TestTCPServer(TCPServer):
    def __init__(self, family):
        super(TestTCPServer, self).__init__()
        self.streams = []
        self.queue = Queue()
        sockets = bind_sockets(None, 'localhost', family)
        self.add_sockets(sockets)
        self.port = sockets[0].getsockname()[1]

    def handle_stream(self, stream, address):
        self.streams.append(stream)
        self.queue.put(stream)

    def stop(self):
        super(TestTCPServer, self).stop()
        for stream in self.streams:
            stream.close()
コード例 #31
0
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.queues import Queue

q = Queue(maxsize=2)


@gen.coroutine
def consumer():
    while True:
        item = yield q.get()
        try:
            print('handling %s', item)
        finally:
            q.task_done()


@gen.coroutine
def producer():
    for item in range(5):
        yield q.put(item)
        print('Put %s' % item)


@gen.coroutine
def main():
    IOLoop.current().spawn_callback(consumer)
    yield producer()
    yield q.join()
    print('down')
コード例 #32
0
 def init_queues(self):
     """ create the queues
     """
     self.from_lsp = Queue()
     self.to_lsp = Queue()
コード例 #33
0
# -*- coding: utf-8 -*-
import json
import logging as log

from tornado import gen
from tornado.queues import Queue
from tornado.web import RequestHandler

from decorator import xxx_except
from lib.mail import HtmlSmtpMail

queue = Queue(maxsize=128)

CREATE_EMAIL_OK = {'Code': 1000, 'Message': 'Create email ok!'}
CREATE_EMAIL_ERROR = {'Code': 2000, 'Message': 'Create email error!'}


class EmailHandler(RequestHandler):

    SUPPORTED_METHODS = ('POST', )

    @gen.coroutine
    @xxx_except(CREATE_EMAIL_ERROR)
    def post(self, *args):
        def get_chronos_job_state(subject):
            return subject.split()[-1].replace('!',
                                               '').replace('.',
                                                           '').capitalize()

        def get_chronos_job_message(state, message):
            if 'Failed' == state:
コード例 #34
0
ファイル: conftest.py プロジェクト: xiefeifan/jupyterlab-lsp
 def initialize(self, manager):
     super().initialize(manager)
     self._messages_wrote = Queue()
     self._ping_sent = False
コード例 #35
0
class LanguageServerSession(LoggingConfigurable):
    """ Manage a session for a connection to a language server
    """

    argv = List(
        trait=Unicode,
        default_value=[],
        help="the command line arguments to start the language server",
    )
    languages = List(
        trait=Unicode,
        default_value=[],
        help="the languages this session can provide language server features",
    )
    process = Instance(
        subprocess.Popen, help="the language server subprocess", allow_none=True
    )
    writer = Instance(stdio.LspStdIoWriter, help="the JSON-RPC writer", allow_none=True)
    reader = Instance(stdio.LspStdIoReader, help="the JSON-RPC reader", allow_none=True)
    from_lsp = Instance(
        Queue, help="a queue for string messages from the server", allow_none=True
    )
    to_lsp = Instance(
        Queue, help="a queue for string message to the server", allow_none=True
    )
    handlers = Set(
        trait=Instance(WebSocketHandler),
        default_value=[],
        help="the currently subscribed websockets",
    )
    status = UseEnum(SessionStatus, default_value=SessionStatus.NOT_STARTED)
    last_handler_message_at = Instance(datetime, allow_none=True)
    last_server_message_at = Instance(datetime, allow_none=True)

    _tasks = None

    def __init__(self, *args, **kwargs):
        """ set up the required traitlets and exit behavior for a session
        """
        super().__init__(*args, **kwargs)
        atexit.register(self.stop)

    def __repr__(self):  # pragma: no cover
        return "<LanguageServerSession(languages={}, argv={})>".format(
            self.languages, self.argv
        )

    def to_json(self):
        return dict(
            languages=self.languages,
            handler_count=len(self.handlers),
            status=self.status.value,
            last_server_message_at=self.last_server_message_at.isoformat()
            if self.last_server_message_at
            else None,
            last_handler_message_at=self.last_handler_message_at.isoformat()
            if self.last_handler_message_at
            else None,
        )

    def initialize(self):
        """ (re)initialize a language server session
        """
        self.stop()
        self.status = SessionStatus.STARTING
        self.init_queues()
        self.init_process()
        self.init_writer()
        self.init_reader()

        loop = asyncio.get_event_loop()
        self._tasks = [
            loop.create_task(coro())
            for coro in [self._read_lsp, self._write_lsp, self._broadcast_from_lsp]
        ]

        self.status = SessionStatus.STARTED

    def stop(self):
        """ clean up all of the state of the session
        """

        self.status = SessionStatus.STOPPING

        if self.process:
            self.process.terminate()
            self.process = None
        if self.reader:
            self.reader.close()
            self.reader = None
        if self.writer:
            self.writer.close()
            self.writer = None

        if self._tasks:
            [task.cancel() for task in self._tasks]

        self.status = SessionStatus.STOPPED

    @observe("handlers")
    def _on_handlers(self, change: Bunch):
        """ re-initialize if someone starts listening, or stop if nobody is
        """
        if change["new"] and not self.process:
            self.initialize()
        elif not change["new"] and self.process:
            self.stop()

    def write(self, message):
        """ wrapper around the write queue to keep it mostly internal
        """
        self.last_handler_message_at = self.now()
        self.to_lsp.put_nowait(message)

    def now(self):
        return datetime.now(timezone.utc)

    def init_process(self):
        """ start the language server subprocess
        """
        self.process = subprocess.Popen(
            self.argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE
        )

    def init_queues(self):
        """ create the queues
        """
        self.from_lsp = Queue()
        self.to_lsp = Queue()

    def init_reader(self):
        """ create the stdout reader (from the language server)
        """
        self.reader = stdio.LspStdIoReader(
            stream=self.process.stdout, queue=self.from_lsp, parent=self
        )

    def init_writer(self):
        """ create the stdin writer (to the language server)
        """
        self.writer = stdio.LspStdIoWriter(
            stream=self.process.stdin, queue=self.to_lsp, parent=self
        )

    async def _read_lsp(self):
        await self.reader.read()

    async def _write_lsp(self):
        await self.writer.write()

    async def _broadcast_from_lsp(self):
        """ loop for reading messages from the queue of messages from the language
            server
        """
        async for msg in self.from_lsp:
            self.last_server_message_at = self.now()
            for handler in self.handlers:
                handler.write_message(msg)
            self.from_lsp.task_done()
コード例 #36
0
class FileSystemWatcher(object):
    def __init__(self,
                 watch_paths,
                 on_changed=None,
                 interval=1.0,
                 recursive=True):
        """Constructor.

        Args:
            watch_paths: A list of filesystem paths to watch for changes.
            on_changed: Callback to call when one or more changes to the watch path are detected.
            interval: The minimum interval at which to notify about changes (in seconds).
            recursive: Should the watch path be monitored recursively for changes?
        """
        if isinstance(watch_paths, basestring):
            watch_paths = [watch_paths]

        watch_paths = [os.path.abspath(path) for path in watch_paths]
        for path in watch_paths:
            if not os.path.exists(path) or not os.path.isdir(path):
                raise MissingFolderError(path)

        self.watch_paths = watch_paths
        self.interval = interval * 1000.0
        self.recursive = recursive
        self.periodic_callback = PeriodicCallback(self.check_fs_events,
                                                  self.interval)
        self.on_changed = on_changed
        self.observer = Observer()
        for path in self.watch_paths:
            self.observer.schedule(WatcherEventHandler(self), path,
                                   self.recursive)
        self.started = False
        self.fs_event_queue = Queue()

    def track_event(self, event):
        self.fs_event_queue.put(event)

    @gen.coroutine
    def check_fs_events(self):
        drained_events = []
        while self.fs_event_queue.qsize() > 0:
            drained_events.append(self.fs_event_queue.get_nowait())
        if len(drained_events) > 0 and callable(self.on_changed):
            logger.debug(
                "Detected %d file system change(s) - triggering callback" %
                len(drained_events))
            self.on_changed(drained_events)

    def start(self):
        if not self.started:
            self.observer.start()
            self.periodic_callback.start()
            self.started = True
            logger.debug("Started file system watcher for paths:\n%s" %
                         "\n".join(self.watch_paths))

    def shutdown(self, timeout=None):
        if self.started:
            self.periodic_callback.stop()
            self.observer.stop()
            self.observer.join(timeout=timeout)
            self.started = False
            logger.debug("Shut down file system watcher for path:\n%s" %
                         "\n".join(self.watch_paths))
コード例 #37
0
 def __init__(self, event_callback, log):
     self.tcp_buffer = ''
     self._reset_tcp_pos()
     self.event_callback = event_callback
     self.message_queue = Queue()
     self.log = log
コード例 #38
0
    async def get(self, provider_prefix, _unescaped_spec):
        """Get a built image for a given spec and repo provider.

        Different repo providers will require different spec information. This
        function relies on the functionality of the tornado `GET` request.

        Parameters
        ----------
            provider_prefix : str
                the nickname for a repo provider (i.e. 'gh')
            spec:
                specifies information needed by the repo provider (i.e. user,
                repo, ref, etc.)

        """
        prefix = '/build/' + provider_prefix
        spec = self.get_spec_from_request(prefix)

        # set up for sending event streams
        self.set_header('content-type', 'text/event-stream')
        self.set_header('cache-control', 'no-cache')

        # Verify if the provider is valid for EventSource.
        # EventSource cannot handle HTTP errors, so we must validate and send
        # error messages on the eventsource.
        if provider_prefix not in self.settings['repo_providers']:
            await self.fail("No provider found for prefix %s" % provider_prefix
                            )
            return

        # create a heartbeat
        IOLoop.current().spawn_callback(self.keep_alive)

        spec = spec.rstrip("/")
        key = '%s:%s' % (provider_prefix, spec)

        # get a provider object that encapsulates the provider and the spec
        try:
            provider = self.get_provider(provider_prefix, spec=spec)
        except Exception as e:
            app_log.exception("Failed to get provider for %s", key)
            await self.fail(str(e))
            return

        if provider.is_banned():
            await self.emit({
                'phase':
                'failed',
                'message':
                'Sorry, {} has been temporarily disabled from launching. Please contact admins for more info!'
                .format(spec)
            })
            return

        repo_url = self.repo_url = provider.get_repo_url()

        # labels to apply to build/launch metrics
        self.repo_metric_labels = {
            'provider': provider.name,
            'repo': repo_url,
        }

        try:
            ref = await provider.get_resolved_ref()
        except Exception as e:
            await self.fail("Error resolving ref for %s: %s" % (key, e))
            return
        if ref is None:
            await self.fail(
                "Could not resolve ref for %s. Double check your URL." % key)
            return

        # generate a complete build name (for GitHub: `build-{user}-{repo}-{ref}`)

        image_prefix = self.settings['image_prefix']

        # Enforces max 255 characters before image
        safe_build_slug = self._safe_build_slug(provider.get_build_slug(),
                                                limit=255 - len(image_prefix))

        build_name = self._generate_build_name(provider.get_build_slug(),
                                               ref,
                                               prefix='build-')

        image_name = self.image_name = '{prefix}{build_slug}:{ref}'.format(
            prefix=image_prefix, build_slug=safe_build_slug,
            ref=ref).replace('_', '-').lower()

        if self.settings['use_registry']:
            image_manifest = await self.registry.get_image_manifest(
                *'/'.join(image_name.split('/')[-2:]).split(':', 1))
            image_found = bool(image_manifest)
        else:
            # Check if the image exists locally!
            # Assume we're running in single-node mode or all binder pods are assigned to the same node!
            docker_client = docker.from_env(version='auto')
            try:
                docker_client.images.get(image_name)
            except docker.errors.ImageNotFound:
                # image doesn't exist, so do a build!
                image_found = False
            else:
                image_found = True

        # Launch a notebook server if the image already is built
        kube = self.settings['kubernetes_client']

        if image_found:
            await self.emit({
                'phase': 'built',
                'imageName': image_name,
                'message': 'Found built image, launching...\n'
            })
            with LAUNCHES_INPROGRESS.track_inprogress():
                await self.launch(kube, provider)
            self.event_log.emit(
                'binderhub.jupyter.org/launch', 3, {
                    'provider':
                    provider.name,
                    'spec':
                    spec,
                    'status':
                    'success',
                    'origin':
                    self.settings['normalized_origin'] if
                    self.settings['normalized_origin'] else self.request.host
                })
            return

        # Prepare to build
        q = Queue()

        if self.settings['use_registry']:
            push_secret = self.settings['push_secret']
        else:
            push_secret = None

        BuildClass = FakeBuild if self.settings.get('fake_build') else Build

        binder_url = '{proto}://{host}{base_url}v2/{provider}/{spec}'.format(
            proto=self.request.protocol,
            host=self.request.host,
            base_url=self.settings['base_url'],
            provider=provider_prefix,
            spec=spec,
        )
        resolved_spec = await provider.get_resolved_spec()
        persistent_binder_url = '{proto}://{host}{base_url}v2/{provider}/{spec}'.format(
            proto=self.request.protocol,
            host=self.request.host,
            base_url=self.settings['base_url'],
            provider=provider_prefix,
            spec=resolved_spec,
        )
        ref_url = await provider.get_resolved_ref_url()
        appendix = self.settings['appendix'].format(
            binder_url=binder_url,
            repo_url=repo_url,
            persistent_binder_url=persistent_binder_url,
            ref_url=ref_url,
        )

        self.build = build = BuildClass(
            q=q,
            api=kube,
            name=build_name,
            namespace=self.settings["build_namespace"],
            repo_url=repo_url,
            ref=ref,
            image_name=image_name,
            push_secret=push_secret,
            build_image=self.settings['build_image'],
            memory_limit=self.settings['build_memory_limit'],
            docker_host=self.settings['build_docker_host'],
            node_selector=self.settings['build_node_selector'],
            appendix=appendix,
            log_tail_lines=self.settings['log_tail_lines'],
            git_credentials=provider.git_credentials,
            sticky_builds=self.settings['sticky_builds'],
        )

        with BUILDS_INPROGRESS.track_inprogress():
            build_starttime = time.perf_counter()
            pool = self.settings['build_pool']
            # Start building
            submit_future = pool.submit(build.submit)
            # TODO: hook up actual error handling when this fails
            IOLoop.current().add_callback(lambda: submit_future)

            log_future = None

            # initial waiting event
            await self.emit({
                'phase': 'waiting',
                'message': 'Waiting for build to start...\n',
            })

            done = False
            failed = False
            while not done:
                progress = await q.get()

                # FIXME: If pod goes into an unrecoverable stage, such as ImagePullBackoff or
                # whatever, we should fail properly.
                if progress['kind'] == 'pod.phasechange':
                    if progress['payload'] == 'Pending':
                        # nothing to do, just waiting
                        continue
                    elif progress['payload'] == 'Deleted':
                        event = {
                            'phase': 'built',
                            'message': 'Built image, launching...\n',
                            'imageName': image_name,
                        }
                        done = True
                    elif progress['payload'] == 'Running':
                        # start capturing build logs once the pod is running
                        if log_future is None:
                            log_future = pool.submit(build.stream_logs)
                        continue
                    elif progress['payload'] == 'Succeeded':
                        # Do nothing, is ok!
                        continue
                    else:
                        # FIXME: message? debug?
                        event = {'phase': progress['payload']}
                elif progress['kind'] == 'log':
                    # We expect logs to be already JSON structured anyway
                    event = progress['payload']
                    payload = json.loads(event)
                    if payload.get('phase') == 'failure':
                        failed = True
                        BUILD_TIME.labels(
                            status='failure').observe(time.perf_counter() -
                                                      build_starttime)
                        BUILD_COUNT.labels(status='failure',
                                           **self.repo_metric_labels).inc()

                await self.emit(event)

        # Launch after building an image
        if not failed:
            BUILD_TIME.labels(status='success').observe(time.perf_counter() -
                                                        build_starttime)
            BUILD_COUNT.labels(status='success',
                               **self.repo_metric_labels).inc()
            with LAUNCHES_INPROGRESS.track_inprogress():
                await self.launch(kube, provider)
            self.event_log.emit(
                'binderhub.jupyter.org/launch', 3, {
                    'provider':
                    provider.name,
                    'spec':
                    spec,
                    'status':
                    'success',
                    'origin':
                    self.settings['normalized_origin'] if
                    self.settings['normalized_origin'] else self.request.host
                })

        # Don't close the eventstream immediately.
        # (javascript) eventstream clients reconnect automatically on dropped connections,
        # so if the server closes the connection first,
        # the client will reconnect which starts a new build.
        # If we sleep here, that makes it more likely that a well-behaved
        # client will close its connection first.
        # The duration of this shouldn't matter because
        # well-behaved clients will close connections after they receive the launch event.
        await gen.sleep(60)
コード例 #39
0
ファイル: conftest.py プロジェクト: yunair/jupyterlab-lsp
 def initialize(self, manager):
     super().initialize(manager)
     self._messages_wrote = Queue()
コード例 #40
0
#! /usr/bin/python2
# -*- coding: utf-8 -*-
import tornado
import tornado.ioloop
from tornado import gen
from tornado.queues import Queue
import tornado.web
import os, uuid, random, string, json
from rolling_shutter import unblockable_rolling_shutter

UPLOAD_DIR = "upload/"
BUF_SIZE = 4096
SID_LEN = 32

q = Queue()
task_list = []


def sid_gen():
    return ''.join(random.SystemRandom().choice(string.hexdigits)
                   for _ in range(SID_LEN))


def get_tasks_info(sid):
    tasks = []
    for task in task_list:
        if not task["sid"] == sid:
            continue
        tasks.append({
            "id": task["id"],
            "name": task["name"],
コード例 #41
0
class Debugger:

    # Requests that requires that the debugger has started
    started_debug_msg_types = [
        'dumpCell', 'setBreakpoints', 'source', 'stackTrace', 'variables',
        'attach', 'configurationDone'
    ]

    # Requests that can be handled even if the debugger is not running
    static_debug_msg_types = [
        'debugInfo', 'inspectVariables', 'richInspectVariables', 'modules'
    ]

    def __init__(self,
                 log,
                 debugpy_stream,
                 event_callback,
                 shell_socket,
                 session,
                 just_my_code=True):
        self.log = log
        self.debugpy_client = DebugpyClient(log, debugpy_stream,
                                            self._handle_event)
        self.shell_socket = shell_socket
        self.session = session
        self.is_started = False
        self.event_callback = event_callback
        self.just_my_code = just_my_code
        self.stopped_queue = Queue()

        self.started_debug_handlers = {}
        for msg_type in Debugger.started_debug_msg_types:
            self.started_debug_handlers[msg_type] = getattr(self, msg_type)

        self.static_debug_handlers = {}
        for msg_type in Debugger.static_debug_msg_types:
            self.static_debug_handlers[msg_type] = getattr(self, msg_type)

        self.breakpoint_list = {}
        self.stopped_threads = set()

        self.debugpy_initialized = False
        self._removed_cleanup = {}

        self.debugpy_host = '127.0.0.1'
        self.debugpy_port = 0
        self.endpoint = None

        self.variable_explorer = VariableExplorer()

    def _handle_event(self, msg):
        if msg['event'] == 'stopped':
            if msg['body']['allThreadsStopped']:
                self.stopped_queue.put_nowait(msg)
                # Do not forward the event now, will be done in the handle_stopped_event
                return
            else:
                self.stopped_threads.add(msg['body']['threadId'])
                self.event_callback(msg)
        elif msg['event'] == 'continued':
            if msg['body']['allThreadsContinued']:
                self.stopped_threads = set()
            else:
                self.stopped_threads.remove(msg['body']['threadId'])
            self.event_callback(msg)
        else:
            self.event_callback(msg)

    async def _forward_message(self, msg):
        return await self.debugpy_client.send_dap_request(msg)

    def _build_variables_response(self, request, variables):
        var_list = [
            var for var in variables if self.accept_variable(var['name'])
        ]
        reply = {
            'seq': request['seq'],
            'type': 'response',
            'request_seq': request['seq'],
            'success': True,
            'command': request['command'],
            'body': {
                'variables': var_list
            }
        }
        return reply

    def _accept_stopped_thread(self, thread_name):
        # TODO: identify Thread-2, Thread-3 and Thread-4. These are NOT
        # Control, IOPub or Heartbeat threads
        forbid_list = [
            'IPythonHistorySavingThread', 'Thread-2', 'Thread-3', 'Thread-4'
        ]
        return thread_name not in forbid_list

    async def handle_stopped_event(self):
        # Wait for a stopped event message in the stopped queue
        # This message is used for triggering the 'threads' request
        event = await self.stopped_queue.get()
        req = {
            'seq': event['seq'] + 1,
            'type': 'request',
            'command': 'threads'
        }
        rep = await self._forward_message(req)
        for t in rep['body']['threads']:
            if self._accept_stopped_thread(t['name']):
                self.stopped_threads.add(t['id'])
        self.event_callback(event)

    @property
    def tcp_client(self):
        return self.debugpy_client

    def start(self):
        if not self.debugpy_initialized:
            tmp_dir = get_tmp_directory()
            if not os.path.exists(tmp_dir):
                os.makedirs(tmp_dir)
            host, port = self.debugpy_client.get_host_port()
            code = 'import debugpy;'
            code += 'debugpy.listen(("' + host + '",' + port + '))'
            content = {'code': code, 'silent': True}
            self.session.send(self.shell_socket, 'execute_request', content,
                              None, (self.shell_socket.getsockopt(ROUTING_ID)))

            ident, msg = self.session.recv(self.shell_socket, mode=0)
            self.debugpy_initialized = msg['content']['status'] == 'ok'

        # Don't remove leading empty lines when debugging so the breakpoints are correctly positioned
        cleanup_transforms = get_ipython(
        ).input_transformer_manager.cleanup_transforms
        if leading_empty_lines in cleanup_transforms:
            index = cleanup_transforms.index(leading_empty_lines)
            self._removed_cleanup[index] = cleanup_transforms.pop(index)

        self.debugpy_client.connect_tcp_socket()
        return self.debugpy_initialized

    def stop(self):
        self.debugpy_client.disconnect_tcp_socket()

        # Restore remove cleanup transformers
        cleanup_transforms = get_ipython(
        ).input_transformer_manager.cleanup_transforms
        for index in sorted(self._removed_cleanup):
            func = self._removed_cleanup.pop(index)
            cleanup_transforms.insert(index, func)

    async def dumpCell(self, message):
        code = message['arguments']['code']
        file_name = get_file_name(code)

        with open(file_name, 'w', encoding='utf-8') as f:
            f.write(code)

        reply = {
            'type': 'response',
            'request_seq': message['seq'],
            'success': True,
            'command': message['command'],
            'body': {
                'sourcePath': file_name
            }
        }
        return reply

    async def setBreakpoints(self, message):
        source = message["arguments"]["source"]["path"]
        self.breakpoint_list[source] = message["arguments"]["breakpoints"]
        return await self._forward_message(message)

    async def source(self, message):
        reply = {
            'type': 'response',
            'request_seq': message['seq'],
            'command': message['command']
        }
        source_path = message["arguments"]["source"]["path"]
        if os.path.isfile(source_path):
            with open(source_path, encoding='utf-8') as f:
                reply['success'] = True
                reply['body'] = {'content': f.read()}
        else:
            reply['success'] = False
            reply['message'] = 'source unavailable'
            reply['body'] = {}

        return reply

    async def stackTrace(self, message):
        reply = await self._forward_message(message)
        # The stackFrames array can have the following content:
        # { frames from the notebook}
        # ...
        # { 'id': xxx, 'name': '<module>', ... } <= this is the first frame of the code from the notebook
        # { frames from ipykernel }
        # ...
        # {'id': yyy, 'name': '<module>', ... } <= this is the first frame of ipykernel code
        # or only the frames from the notebook.
        # We want to remove all the frames from ipykernel when they are present.
        try:
            sf_list = reply["body"]["stackFrames"]
            module_idx = len(sf_list) - next(
                i for i, v in enumerate(reversed(sf_list), 1)
                if v["name"] == "<module>" and i != 1)
            reply["body"]["stackFrames"] = reply["body"][
                "stackFrames"][:module_idx + 1]
        except StopIteration:
            pass
        return reply

    def accept_variable(self, variable_name):
        forbid_list = [
            '__name__', '__doc__', '__package__', '__loader__', '__spec__',
            '__annotations__', '__builtins__', '__builtin__', '__display__',
            'get_ipython', 'debugpy', 'exit', 'quit', 'In', 'Out', '_oh',
            '_dh', '_', '__', '___'
        ]
        cond = variable_name not in forbid_list
        cond = cond and not bool(re.search(r'^_\d', variable_name))
        cond = cond and variable_name[0:2] != '_i'
        return cond

    async def variables(self, message):
        reply = {}
        if not self.stopped_threads:
            variables = self.variable_explorer.get_children_variables(
                message['arguments']['variablesReference'])
            return self._build_variables_response(message, variables)
        else:
            reply = await self._forward_message(message)
            # TODO : check start and count arguments work as expected in debugpy
            reply['body']['variables'] = \
                [var for var in reply['body']['variables'] if self.accept_variable(var['name'])]
        return reply

    async def attach(self, message):
        host, port = self.debugpy_client.get_host_port()
        message['arguments']['connect'] = {'host': host, 'port': port}
        message['arguments']['logToFile'] = True
        # Experimental option to break in non-user code.
        # The ipykernel source is in the call stack, so the user
        # has to manipulate the step-over and step-into in a wize way.
        # Set debugOptions for breakpoints in python standard library source.
        if not self.just_my_code:
            message['arguments']['debugOptions'] = ['DebugStdLib']
        return await self._forward_message(message)

    async def configurationDone(self, message):
        reply = {
            'seq': message['seq'],
            'type': 'response',
            'request_seq': message['seq'],
            'success': True,
            'command': message['command']
        }
        return reply

    async def debugInfo(self, message):
        breakpoint_list = []
        for key, value in self.breakpoint_list.items():
            breakpoint_list.append({'source': key, 'breakpoints': value})
        reply = {
            'type': 'response',
            'request_seq': message['seq'],
            'success': True,
            'command': message['command'],
            'body': {
                'isStarted': self.is_started,
                'hashMethod': 'Murmur2',
                'hashSeed': get_tmp_hash_seed(),
                'tmpFilePrefix': get_tmp_directory() + os.sep,
                'tmpFileSuffix': '.py',
                'breakpoints': breakpoint_list,
                'stoppedThreads': list(self.stopped_threads),
                'richRendering': True,
                'exceptionPaths': ['Python Exceptions']
            }
        }
        return reply

    async def inspectVariables(self, message):
        self.variable_explorer.untrack_all()
        # looks like the implementation of untrack_all in ptvsd
        # destroys objects we nee din track. We have no choice but
        # reinstantiate the object
        self.variable_explorer = VariableExplorer()
        self.variable_explorer.track()
        variables = self.variable_explorer.get_children_variables()
        return self._build_variables_response(message, variables)

    async def richInspectVariables(self, message):
        reply = {
            "type": "response",
            "sequence_seq": message["seq"],
            "success": False,
            "command": message["command"],
        }

        var_name = message["arguments"]["variableName"]
        valid_name = str.isidentifier(var_name)
        if not valid_name:
            reply["body"] = {"data": {}, "metadata": {}}
            if var_name == "special variables" or var_name == "function variables":
                reply["success"] = True
            return reply

        repr_data = {}
        repr_metadata = {}
        if not self.stopped_threads:
            # The code did not hit a breakpoint, we use the intepreter
            # to get the rich representation of the variable
            result = get_ipython().user_expressions({var_name:
                                                     var_name})[var_name]
            if result.get("status", "error") == "ok":
                repr_data = result.get("data", {})
                repr_metadata = result.get("metadata", {})
        else:
            # The code has stopped on a breakpoint, we use the setExpression
            # request to get the rich representation of the variable
            code = f"get_ipython().display_formatter.format({var_name})"
            frame_id = message["arguments"]["frameId"]
            seq = message["seq"]
            reply = await self._forward_message({
                "type": "request",
                "command": "evaluate",
                "seq": seq + 1,
                "arguments": {
                    "expression": code,
                    "frameId": frame_id
                },
            })
            if reply["success"]:
                repr_data, repr_metadata = eval(reply["body"]["result"], {},
                                                {})

        body = {
            "data": repr_data,
            "metadata":
            {k: v
             for k, v in repr_metadata.items() if k in repr_data},
        }

        reply["body"] = body
        reply["success"] = True
        return reply

    async def modules(self, message):
        modules = list(sys.modules.values())
        startModule = message.get('startModule', 0)
        moduleCount = message.get('moduleCount', len(modules))
        mods = []
        for i in range(startModule, moduleCount):
            module = modules[i]
            filename = getattr(getattr(module, '__spec__', None), 'origin',
                               None)
            if filename and filename.endswith('.py'):
                mods.append({
                    'id': i,
                    'name': module.__name__,
                    'path': filename
                })

        reply = {'body': {'modules': mods, 'totalModules': len(modules)}}
        return reply

    async def process_request(self, message):
        reply = {}

        if message['command'] == 'initialize':
            if self.is_started:
                self.log.info('The debugger has already started')
            else:
                self.is_started = self.start()
                if self.is_started:
                    self.log.info('The debugger has started')
                else:
                    reply = {
                        'command': 'initialize',
                        'request_seq': message['seq'],
                        'seq': 3,
                        'success': False,
                        'type': 'response'
                    }

        handler = self.static_debug_handlers.get(message['command'], None)
        if handler is not None:
            reply = await handler(message)
        elif self.is_started:
            handler = self.started_debug_handlers.get(message['command'], None)
            if handler is not None:
                reply = await handler(message)
            else:
                reply = await self._forward_message(message)

        if message['command'] == 'disconnect':
            self.stop()
            self.breakpoint_list = {}
            self.stopped_threads = set()
            self.is_started = False
            self.log.info('The debugger has stopped')

        return reply
コード例 #42
0
ファイル: publishers.py プロジェクト: hazmat345/brew-view
class TornadoPikaPublisher(BeergardenPublisher, PikaClient):
    def __init__(self, **kwargs):
        self.logger = logging.getLogger(__name__)

        self._shutdown_timeout = timedelta(
            seconds=kwargs.pop('shutdown_timeout', 5))
        self._work_queue = Queue()
        self._connection = None
        self._channel = None

        self.coroutiner = CoroutineMaker({
            'TornadoConnection': 'on_open_callback',
            'channel': 'on_open_callback'
        })

        # Trying to get super() to work with incompatible signatures is a nightmare
        BeergardenPublisher.__init__(self)
        PikaClient.__init__(self, **kwargs)

        IOLoop.current().spawn_callback(self._process)

    def shutdown(self):
        return self._work_queue.join(timeout=self._shutdown_timeout)

    @coroutine
    def _open_connection(self):
        self._connection = yield self.coroutiner.convert(TornadoConnection)(
            parameters=self._conn_params, stop_ioloop_on_close=False)

    @coroutine
    def _open_channel(self):
        self._channel = yield self.coroutiner.convert(
            self._connection.channel)()

    @coroutine
    def _process(self):

        while True:
            item = yield self._work_queue.get()

            try:
                if not self._connection or not self._connection.is_open:
                    yield self._open_connection()
                if not self._channel or not self._channel.is_open:
                    yield self._open_channel()

                yield getattr(self._channel, item[0])(**item[1])
            finally:
                self._work_queue.task_done()

    def publish(self, message, **kwargs):
        """Publish a message.

        :param message: The message to publish
        :param kwargs: Additional message properties
        :Keyword Arguments:
            * *routing_key* --
              Routing key to use when publishing
            * *headers* --
              Headers to be included as part of the message properties
            * *expiration* --
              Expiration to be included as part of the message properties
        :return: None
        """
        self._work_queue.put(('basic_publish', {
            'exchange':
            self._exchange,
            'routing_key':
            kwargs['routing_key'],
            'body':
            message,
            'properties':
            BasicProperties(app_id='beer-garden',
                            content_type='text/plain',
                            headers=kwargs.pop('headers', None),
                            expiration=kwargs.pop('expiration', None))
        }))

    def _event_publish_args(self, event, **kwargs):

        # Main thing we need to do here is figure out the appropriate routing key
        args = {}
        if event.metadata and 'routing_key' in event.metadata:
            args['routing_key'] = event.metadata['routing_key']
        elif 'request' in kwargs:
            request = kwargs['request']
            args['routing_key'] = get_routing_key('request', request.system,
                                                  request.system_version,
                                                  request.instance_name)
        else:
            args['routing_key'] = 'beergarden'

        return args
コード例 #43
0
#     pass
# if __name__ == '__main__':
#     count = 0
#
#     semaphore = asyncio.Semaphore(100)
#     loop = asyncio.get_event_loop()
#     url = 'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&ch=&tn=baiduerr&bar=&wd={0}'
#     tasks = [get_http(url.format(i)) for i in range(600)]
#     loop.run_until_complete(asyncio.wait(tasks))
#     loop.close()

from tornado import gen
from tornado.ioloop import IOLoop
from tornado.queues import Queue

q = Queue(maxsize=100)

async def consumer():
    async for item in q:
        try:
            print('Doing work on %s' % item)
            await gen.sleep(0.01)
        finally:
            q.task_done()

async def producer():
    for item in range(5):
        await q.put(item)
        print('Put %s' % item)

async def main():
コード例 #44
0
class HeartbeatConnection(object):
    """
    与atxserver2建立连接,汇报当前已经连接的设备
    """
    def __init__(self,
                 url="ws://*****:*****@nobody.io"
        self._secret = secret

        self._platform = platform
        self._priority = priority
        self._queue = Queue()
        self._db = defaultdict(dict)

    async def open(self):
        self._ws = await self.connect()
        IOLoop.current().spawn_callback(self._drain_ws_message)
        IOLoop.current().spawn_callback(self._drain_queue)

    async def _drain_queue(self):
        """
        Logic:
            - send message to server when server is alive
            - update local db
        """
        while True:
            message = await self._queue.get()
            if message is None:
                logger.info("Resent messages: %s", self._db)
                for _, v in self._db.items():
                    await self._ws.write_message(v)
                continue

            if 'udid' in message:  # ping消息不包含在裡面
                udid = message['udid']
                update_recursive(self._db, {udid: message})
            self._queue.task_done()

            if self._ws:
                try:
                    await self._ws.write_message(message)
                    logger.debug("websocket send: %s", message)
                except TypeError as e:
                    logger.info("websocket write_message error: %s", e)

    async def _drain_ws_message(self):
        while True:
            message = await self._ws.read_message()
            logger.debug("WS read message: %s", message)
            if message is None:
                self._ws = None
                logger.warning("WS closed")
                self._ws = await self.connect()
                await self._queue.put(None)
            logger.info("WS receive message: %s", message)

    async def connect(self):
        """
        Returns:
            tornado.WebSocketConnection
        """
        cnt = 0
        while True:
            try:
                ws = await self._connect()
                cnt = 0
                return ws
            except Exception as e:
                cnt = min(30, cnt + 1)
                logger.warning("WS connect error: %s, reconnect after %ds", e,
                               cnt + 1)
                await gen.sleep(cnt + 1)

    async def _connect(self):
        ws = await websocket.websocket_connect(self._ws_url, ping_interval=3)
        ws.__class__ = SafeWebSocket

        await ws.write_message({
            "command": "handshake",
            "name": self._name,
            "owner": self._owner,
            "secret": self._secret,
            "url": self._provider_url,
            "priority": self._priority,  # the large the importanter
        })

        msg = await ws.read_message()
        logger.info("WS receive: %s", msg)
        return ws

    async def device_update(self, data: dict):
        """
        Args:
            data (dict) should contains keys
            - provider (dict: optional)
            - coding (bool: optional)
            - properties (dict: optional)
        """
        data['command'] = 'update'
        data['platform'] = self._platform

        await self._queue.put(data)

    async def ping(self):
        await self._ws.write_message({"command": "ping"})
コード例 #45
0
def crawl_gevent(queue):
    url_list = []
    tasks = []
    i = 0
    while not queue.empty():
        url = queue.get()._result
        url_list.append(url)
        if len(url_list) == 250:
            i += 1
            tasks.append(gevent.spawn(crawl, url_list, i))
            url_list = []
    gevent.joinall(tasks)


if __name__ == '__main__':
    queue = Queue()
    urls = []
    with open("d:\\urls.txt") as fp:
        for url in fp:
            urls.append(url.strip())
    print("一共%s个url" % len(urls))
    for url in urls:
        queue.put(url)

    start = time.time()
    print("**********************开始计时**********************")
    crawl_gevent(queue)
    end = time.time()
    print("**********************结束计时**********************")
    print("总耗时:", end - start)
コード例 #46
0
import tornado.ioloop
import tornado.web
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPClient, AsyncHTTPClient, HTTPRequest
from tornado.queues import Queue
from comet_secret import AUTH_SECRET

import json

waiters = []

msgQueue = Queue(maxsize=10)
msgBuffer = []
msgLastID = 0


def get_sid(requestHandler):
    sid = requestHandler.get_cookie('sessionid')
    return sid


@gen.coroutine
def send_message():
    '''Ждем появления сообщения в очереди, затем рассылаем его всем ожидающим'''
    global waiters, msgLastID
    while 1:
        msg = yield msgQueue.get()
        msgBuffer.append(msg)
        print('Рассылаем сообщение: ' + str(msg))
        for waiter in waiters:
コード例 #47
0
    async def get(self, provider_prefix, spec):
        """Get a built image for a given GitHub user, repo, and ref."""
        # We gonna send out event streams!
        self.set_header('content-type', 'text/event-stream')
        self.set_header('cache-control', 'no-cache')

        # EventSource cannot handle HTTP errors,
        # so we have to send error messages on the eventsource
        if provider_prefix not in self.settings['repo_providers']:
            await self.fail("No provider found for prefix %s" % provider_prefix)
            return

        IOLoop.current().spawn_callback(self.keep_alive)

        key = '%s:%s' % (provider_prefix, spec)

        try:
            provider = self.get_provider(provider_prefix, spec=spec)
        except Exception as e:
            app_log.exception("Failed to get provider for %s", key)
            await self.fail(str(e))
            return

        repo = self.repo = provider.get_repo_url()

        try:
            ref = await provider.get_resolved_ref()
        except Exception as e:
            await self.fail("Error resolving ref for %s: %s" % (key, e))
            return
        if ref is None:
            await self.fail("Could not resolve ref for %s. Double check your URL." % key)
            return
        build_name = self._generate_build_name(provider.get_build_slug(), ref).replace('_', '-')

        # FIXME: EnforceMax of 255 before image and 128 for tag
        image_name = self.image_name = '{prefix}{build_slug}:{ref}'.format(
            prefix=self.settings['docker_image_prefix'],
            build_slug=provider.get_build_slug(), ref=ref
        ).replace('_', '-').lower()

        if self.settings['use_registry']:
            image_manifest = await self.registry.get_image_manifest(*image_name.split('/', 1)[1].split(':', 1))
            image_found = bool(image_manifest)
        else:
            # Check if the image exists locally!
            # Assume we're running in single-node mode!
            docker_client = docker.from_env(version='auto')
            try:
                docker_client.images.get(image_name)
            except docker.errors.ImageNotFound:
                # image doesn't exist, so do a build!
                image_found = False
            else:
                image_found = True

        if image_found:
            await self.emit({
                'phase': 'built',
                'imageName': image_name,
                'message': 'Found built image, launching...\n'
            })
            await self.launch()
            return

        api = client.CoreV1Api()

        q = Queue()

        if self.settings['use_registry']:
            push_secret = self.settings['docker_push_secret']
        else:
            push_secret = None

        build = Build(
            q=q,
            api=api,
            name=build_name,
            namespace=self.settings["build_namespace"],
            git_url=repo,
            ref=ref,
            image_name=image_name,
            push_secret=push_secret,
            builder_image=self.settings['builder_image_spec'],
        )

        with BUILDS_INPROGRESS.track_inprogress():
            build_starttime = time.perf_counter()
            pool = self.settings['build_pool']
            pool.submit(build.submit)

            log_future = None

            # initial waiting event
            await self.emit({
                'phase': 'waiting',
                'message': 'Waiting for build to start...\n',
            })

            done = False
            failed = False
            while not done:
                progress = await q.get()

                # FIXME: If pod goes into an unrecoverable stage, such as ImagePullBackoff or
                # whatever, we should fail properly.
                if progress['kind'] == 'pod.phasechange':
                    if progress['payload'] == 'Pending':
                        # nothing to do, just waiting
                        continue
                    elif progress['payload'] == 'Deleted':
                        event = {
                            'phase': 'built',
                            'message': 'Built image, launching...\n',
                            'imageName': image_name,
                        }
                        done = True
                    elif progress['payload'] == 'Running':
                        # start capturing build logs once the pod is running
                        if log_future is None:
                            log_future = pool.submit(build.stream_logs)
                        continue
                    elif progress['payload'] == 'Succeeded':
                        # Do nothing, is ok!
                        continue
                    else:
                        # FIXME: message? debug?
                        event = {'phase': progress['payload']}
                elif progress['kind'] == 'log':
                    # We expect logs to be already JSON structured anyway
                    event = progress['payload']
                    payload = json.loads(event)
                    if payload.get('phase', None) == 'failure':
                        failed = True
                        BUILD_TIME.labels(status='failure').observe(time.perf_counter() - build_starttime)

                await self.emit(event)

        if not failed:
            BUILD_TIME.labels(status='success').observe(time.perf_counter() - build_starttime)
            with LAUNCHES_INPROGRESS.track_inprogress():
                await self.launch()
コード例 #48
0
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.queues import Queue
import time
q = Queue(maxsize=500)

async def consumer():
    print("10")
    async for item in q:
        try:
            print('Doing work on %s' % item)
            #await gen.sleep(0.01)
        finally:
            #task_done减少计数
            q.task_done()


async def producer():
    print("11")
    for item in range(50):
        #put增加计数
        await q.put(item)
        print('Put %s' % item)
    print("队伍数量producer",q.qsize())


async def main():
    # Start consumer without waiting (since it never finishes).
    print("123")
    IOLoop.current().spawn_callback(consumer)
    print("456")
コード例 #49
0
class Server:
    """ Server class. """
    __slots__ = [
        'data_path', 'games_path', 'available_maps', 'maps_mtime',
        'notifications', 'games_scheduler', 'allow_registrations', 'max_games',
        'remove_canceled_games', 'users', 'games', 'daide_servers',
        'backup_server', 'backup_games', 'backup_delay_seconds',
        'ping_seconds', 'interruption_handler', 'backend',
        'games_with_dummy_powers', 'dispatched_dummy_powers'
    ]

    # Servers cache.
    __cache__ = {}  # {absolute path of working folder => Server}

    def __new__(cls, server_dir=None, **kwargs):
        #pylint: disable=unused-argument
        server_dir = get_absolute_path(server_dir)
        if server_dir in cls.__cache__:
            server = cls.__cache__[server_dir]
        else:
            server = object.__new__(cls)
        return server

    def __init__(self, server_dir=None, **kwargs):
        """ Initialize the server.
            Server data is stored in folder ``<working directory>/data``.

            :param server_dir: path of folder in (from) which server data will be saved (loaded).
                If None, working directory (where script is executed) will be used.
            :param kwargs: (optional) values for some public configurable server attributes.
                Given values will overwrite values saved on disk.
        """

        # File paths and attributes related to database.
        server_dir = get_absolute_path(server_dir)
        if server_dir in self.__class__.__cache__:
            return
        if not os.path.exists(server_dir) or not os.path.isdir(server_dir):
            raise exceptions.ServerDirException(server_dir)
        self.data_path = os.path.join(server_dir, 'data')
        self.games_path = os.path.join(self.data_path, 'games')

        # Data in memory (not stored on disk).
        self.notifications = Queue()
        self.games_scheduler = Scheduler(1, self._process_game)
        self.backup_server = None
        self.backup_games = {}
        self.interruption_handler = InterruptionHandler(self)
        # Backend objects used to run server. If None, server is not yet started.
        # Initialized when you call Server.start() (see method below).
        self.backend = None  # type: _ServerBackend

        # Database (stored on disk).
        self.allow_registrations = True
        self.max_games = 0
        self.remove_canceled_games = False
        self.backup_delay_seconds = constants.DEFAULT_BACKUP_DELAY_SECONDS
        self.ping_seconds = constants.DEFAULT_PING_SECONDS
        self.users = None  # type: Users  # Users and administrators usernames.
        self.available_maps = {
        }  # type: Dict[str, List[str]] # {"map_name" => list("map_power")}
        self.maps_mtime = 0  # Latest maps modification date (used to manage maps cache in server object).

        # Server games loaded on memory (stored on disk).
        # Saved separately (each game in one JSON file).
        # Each game also stores tokens connected (player tokens, observer tokens, omniscient tokens).
        self.games = {}  # type: Dict[str, ServerGame]

        # Dictionary mapping game ID to list of power names.
        self.games_with_dummy_powers = {}  # type: Dict[str, List[str]]

        # Dictionary mapping a game ID present in games_with_dummy_powers, to
        # a couple of associated bot token and time when bot token was associated to this game ID.
        # If there is no bot token associated, couple is (None, None).
        self.dispatched_dummy_powers = {}  # type: dict{str, tuple}

        # DAIDE TCP servers listening to a game's dedicated port.
        self.daide_servers = {}  # {port: daide_server}

        # Load data on memory.
        self._load()

        # If necessary, updated server configurable attributes from kwargs.
        self.allow_registrations = bool(
            kwargs.pop(strings.ALLOW_REGISTRATIONS, self.allow_registrations))
        self.max_games = int(kwargs.pop(strings.MAX_GAMES, self.max_games))
        self.remove_canceled_games = bool(
            kwargs.pop(strings.REMOVE_CANCELED_GAMES,
                       self.remove_canceled_games))
        self.backup_delay_seconds = int(
            kwargs.pop(strings.BACKUP_DELAY_SECONDS,
                       self.backup_delay_seconds))
        self.ping_seconds = int(
            kwargs.pop(strings.PING_SECONDS, self.ping_seconds))
        assert not kwargs
        LOGGER.debug('Ping        : %s', self.ping_seconds)
        LOGGER.debug('Backup delay: %s', self.backup_delay_seconds)

        # Add server on servers cache.
        self.__class__.__cache__[server_dir] = self

    @property
    def port(self):
        """ Property: return port where this server currently runs, or None if server is not yet started. """
        return self.backend.port if self.backend else None

    def _load_available_maps(self):
        """ Load a dictionary (self.available_maps) mapping every map name to a dict of map info.
            for all maps available in diplomacy package.
        """
        diplomacy_map_dir = os.path.join(diplomacy.settings.PACKAGE_DIR,
                                         strings.MAPS)
        new_maps_mtime = self.maps_mtime
        for filename in os.listdir(diplomacy_map_dir):
            if filename.endswith('.map'):
                map_filename = os.path.join(diplomacy_map_dir, filename)
                map_mtime = os.path.getmtime(map_filename)
                map_name = filename[:-4]
                if map_name not in self.available_maps or map_mtime > self.maps_mtime:
                    # Either it's a new map file or map file was modified.
                    available_map = Map(map_name)
                    self.available_maps[map_name] = {
                        'powers': list(available_map.powers),
                        'supply_centers': list(available_map.scs),
                        'loc_type': available_map.loc_type.copy(),
                        'loc_abut': available_map.loc_abut.copy(),
                        'aliases': available_map.aliases.copy()
                    }
                    new_maps_mtime = max(new_maps_mtime, map_mtime)
        self.maps_mtime = new_maps_mtime

    def _get_server_data_filename(self):
        """ Return path to server data file name (server.json, making sure that data folder exists.
            Raises an exception if data folder does not exists and cannot be created.
        """
        return os.path.join(ensure_path(self.data_path), 'server.json')

    def _load(self):
        """ Load database from disk. """
        LOGGER.info("Loading database.")
        ensure_path(self.data_path)  # <server dir>/data
        ensure_path(self.games_path)  # <server dir>/data/games
        server_data_filename = self._get_server_data_filename(
        )  # <server dir>/data/server.json
        if os.path.exists(server_data_filename):
            LOGGER.info("Loading server.json.")
            server_info = load_json_from_disk(server_data_filename)
            self.allow_registrations = server_info[strings.ALLOW_REGISTRATIONS]
            self.backup_delay_seconds = server_info[
                strings.BACKUP_DELAY_SECONDS]
            self.ping_seconds = server_info[strings.PING_SECONDS]
            self.max_games = server_info[strings.MAX_GAMES]
            self.remove_canceled_games = server_info[
                strings.REMOVE_CANCELED_GAMES]
            self.users = Users.from_dict(server_info[strings.USERS])
            self.available_maps = server_info[strings.AVAILABLE_MAPS]
            self.maps_mtime = server_info[strings.MAPS_MTIME]
            # games and map are loaded from disk.
        else:
            LOGGER.info("Creating server.json.")
            self.users = Users()
            self.backup_now(force=True)
        # Add default accounts.
        for (username, password) in (('admin', 'password'),
                                     (constants.PRIVATE_BOT_USERNAME,
                                      constants.PRIVATE_BOT_PASSWORD)):
            if not self.users.has_username(username):
                self.users.add_user(username, common.hash_password(password))
        # Set default admin account.
        self.users.add_admin('admin')

        self._load_available_maps()

        LOGGER.info('Server loaded.')

    def _backup_server_data_now(self, force=False):
        """ Save latest backed-up version of server data on disk. This does not save games.

            :param force: if True, force to save current server data,
                even if it was not modified recently.
        """
        if force:
            self.save_data()
        if self.backup_server:
            save_json_on_disk(self._get_server_data_filename(),
                              self.backup_server)
            self.backup_server = None
            LOGGER.info("Saved server.json.")

    def _backup_games_now(self, force=False):
        """ Save latest backed-up versions of loaded games on disk.

            :param force: if True, force to save all games currently loaded in memory
                even if they were not modified recently.
        """
        ensure_path(self.games_path)
        if force:
            for server_game in self.games.values():
                self.save_game(server_game)
        for game_id, game_dict in self.backup_games.items():
            game_path = os.path.join(self.games_path, '%s.json' % game_id)
            save_json_on_disk(game_path, game_dict)
            LOGGER.info('Game data saved: %s', game_id)
        self.backup_games.clear()

    def backup_now(self, force=False):
        """ Save backup of server data and loaded games immediately.

            :param force: if True, force to save server data and all loaded games
                even if there are no recent changes.
        """
        self._backup_server_data_now(force=force)
        self._backup_games_now(force=force)

    @gen.coroutine
    def _process_game(self, server_game):
        """ Process given game and send relevant notifications.

            :param server_game: server game to process
            :return: A boolean indicating if we must stop game.
            :type server_game: ServerGame
        """
        LOGGER.debug('Processing game %s (status %s).', server_game.game_id,
                     server_game.status)
        previous_phase_data, current_phase_data, kicked_powers = server_game.process(
        )
        self.save_game(server_game)

        if previous_phase_data is None and kicked_powers is None:
            # Game must be unscheduled immediately.
            return True

        notifier = Notifier(self)

        if kicked_powers:
            # Game was not processed because of kicked powers.
            # We notify those kicked powers and game must be unscheduled immediately.
            kicked_addresses = [(power_name, token)
                                for (power_name,
                                     tokens) in kicked_powers.items()
                                for token in tokens]
            # Notify kicked players.
            notifier.notify_game_addresses(
                server_game.game_id,
                kicked_addresses,
                notifications.PowersControllers,
                powers=server_game.get_controllers(),
                timestamps=server_game.get_controllers_timestamps())
            return True

        # Game was processed normally.
        # Send game updates to powers, observers and omniscient observers.
        yield notifier.notify_game_processed(server_game, previous_phase_data,
                                             current_phase_data)

        # If game is completed, we must close associated DAIDE port.
        if server_game.is_game_done:
            self.stop_daide_server(server_game.game_id)

        # Game must be stopped if not active.
        return not server_game.is_game_active

    @gen.coroutine
    def _task_save_database(self):
        """ IO loop callable: save database and loaded games periodically.
            Data to save are checked every BACKUP_DELAY_SECONDS seconds.
        """
        LOGGER.info('Waiting for save events.')
        while True:
            yield gen.sleep(self.backup_delay_seconds)
            self.backup_now()

    @gen.coroutine
    def _task_send_notifications(self):
        """ IO loop callback: consume notifications and send it. """
        LOGGER.info('Waiting for notifications to send.')
        while True:
            connection_handler, notification = yield self.notifications.get()
            try:
                yield connection_handler.write_message(notification)
            except WebSocketClosedError:
                LOGGER.error(
                    'Websocket was closed while sending a notification.')
            except StreamClosedError:
                LOGGER.error('Stream was closed while sending a notification.')
            finally:
                self.notifications.task_done()

    def set_tasks(self, io_loop: IOLoop):
        """ Set server callbacks on given IO loop.
            Must be called once per server before starting IO loop.
        """
        io_loop.add_callback(self._task_save_database)
        io_loop.add_callback(self._task_send_notifications)
        # These both coroutines are used to manage games.
        io_loop.add_callback(self.games_scheduler.process_tasks)
        io_loop.add_callback(self.games_scheduler.schedule)
        # Set callback on KeyboardInterrupt.
        signal.signal(signal.SIGINT, self.interruption_handler.handler)
        atexit.register(self.backup_now)

    def start(self, port=None, io_loop=None):
        """ Start server if not yet started. Raise an exception if server is already started.

            :param port: (optional) port where server must run. If not provided,
                try to start on a random selected port. Use property `port` to get current server port.
            :param io_loop: (optional) tornado IO lopp where server must run. If not provided, get
                default IO loop instance (tornado.ioloop.IOLoop.instance()).
        """
        if self.backend is not None:
            raise exceptions.DiplomacyException(
                'Server is already running on port %s.' % self.backend.port)
        if port is None:
            port = 8432
        if io_loop is None:
            io_loop = tornado.ioloop.IOLoop.instance()
        handlers = [
            tornado.web.url(r"/", ConnectionHandler, {'server': self}),
        ]
        settings = {
            'cookie_secret': common.generate_token(),
            'xsrf_cookies': True,
            'websocket_ping_interval': self.ping_seconds,
            'websocket_ping_timeout': 2 * self.ping_seconds,
            'websocket_max_message_size': 64 * 1024 * 1024
        }
        self.backend = _ServerBackend()
        self.backend.application = tornado.web.Application(
            handlers, **settings)
        self.backend.http_server = self.backend.application.listen(port)
        self.backend.io_loop = io_loop
        self.backend.port = port
        self.set_tasks(io_loop)
        LOGGER.info('Running on port %d', self.backend.port)
        if not io_loop.asyncio_loop.is_running():
            io_loop.start()

    def get_game_indices(self):
        """ Iterate over all game indices in server database.
            Convenient method to iterate over all server games (by calling load_game() on each game index).
        """
        for game_id in self.games:
            yield game_id
        if os.path.isdir(self.games_path):
            for filename in os.listdir(self.games_path):
                if filename.endswith('.json'):
                    game_id = filename[:-5]
                    if game_id not in self.games:
                        yield game_id

    def count_server_games(self):
        """ Return number of server games in server database. """
        count = 0
        if os.path.isdir(self.games_path):
            for filename in os.listdir(self.games_path):
                if filename.endswith('.json'):
                    count += 1
        return count

    def save_data(self):
        """ Update on-memory backup of server data. """
        self.backup_server = {
            strings.ALLOW_REGISTRATIONS: self.allow_registrations,
            strings.BACKUP_DELAY_SECONDS: self.backup_delay_seconds,
            strings.PING_SECONDS: self.ping_seconds,
            strings.MAX_GAMES: self.max_games,
            strings.REMOVE_CANCELED_GAMES: self.remove_canceled_games,
            strings.USERS: self.users.to_dict(),
            strings.AVAILABLE_MAPS: self.available_maps,
            strings.MAPS_MTIME: self.maps_mtime,
        }

    def save_game(self, server_game):
        """ Update on-memory version of given server game.

            :param server_game: server game
            :type server_game: ServerGame
        """
        self.backup_games[server_game.game_id] = server_game.to_dict()
        # Check dummy powers for a game every time we have to save it.
        self.register_dummy_power_names(server_game)

    def register_dummy_power_names(self, server_game):
        """ Update internal registry of dummy power names waiting for orders for given server games.

            :param server_game: server game to check
            :type server_game: ServerGame
        """
        if server_game.map.root_map != 'standard':
            # Bot does not currently support other maps.
            return
        dummy_power_names = []
        if server_game.is_game_active or server_game.is_game_paused:
            dummy_power_names = server_game.get_dummy_unordered_power_names()
            if dummy_power_names:
                # Update registry of dummy powers.
                self.games_with_dummy_powers[
                    server_game.game_id] = dummy_power_names
                # Every time we update registry of dummy powers,
                # then we also update bot time in registry of dummy powers associated to bot tokens.
                bot_token, _ = self.dispatched_dummy_powers.get(
                    server_game.game_id, (None, None))
                self.dispatched_dummy_powers[server_game.game_id] = (
                    bot_token, common.timestamp_microseconds())
        if not dummy_power_names:
            # No waiting dummy powers for this game, or game is not playable (canceled, completed, or forming).
            self.games_with_dummy_powers.pop(server_game.game_id, None)
            self.dispatched_dummy_powers.pop(server_game.game_id, None)

    def get_dummy_waiting_power_names(self, buffer_size, bot_token):
        """ Return names of dummy powers waiting for orders for current loaded games.
            This query is allowed only for bot tokens.

            :param buffer_size: maximum number of powers queried.
            :param bot_token: bot token
            :return: a dictionary mapping each game ID to a list of power names.
        """
        if self.users.get_name(bot_token) != constants.PRIVATE_BOT_USERNAME:
            raise exceptions.ResponseException('Invalid bot token %s' %
                                               bot_token)
        selected_size = 0
        selected_games = {}
        for game_id in sorted(list(self.games_with_dummy_powers.keys())):
            registered_token, registered_time = self.dispatched_dummy_powers[
                game_id]
            if registered_token is not None:
                time_elapsed_seconds = (common.timestamp_microseconds() -
                                        registered_time) / 1000000
                if time_elapsed_seconds > constants.PRIVATE_BOT_TIMEOUT_SECONDS or registered_token == bot_token:
                    # This game still has dummy powers but, either time allocated to previous bot token is over,
                    # or bot dedicated to this game is asking for current dummy powers of this game.
                    # Forget previous bot token.
                    registered_token = None
            if registered_token is None:
                # This game is not associated to any bot token.
                # Let current bot token handle it if buffer size is not reached.
                dummy_power_names = self.games_with_dummy_powers[game_id]
                nb_powers = len(dummy_power_names)
                if selected_size + nb_powers > buffer_size:
                    # Buffer size would be exceeded. We stop to collect games now.
                    break
                # Otherwise we collect this game.
                selected_games[game_id] = dummy_power_names
                selected_size += nb_powers
                self.dispatched_dummy_powers[game_id] = (
                    bot_token, common.timestamp_microseconds())
        return selected_games

    def has_game_id(self, game_id):
        """ Return True if server database contains such game ID. """
        if game_id in self.games:
            return True
        expected_game_path = os.path.join(self.games_path, '%s.json' % game_id)
        return os.path.exists(expected_game_path) and os.path.isfile(
            expected_game_path)

    def load_game(self, game_id):
        """ Return a game matching given game ID from server database.
            Raise an exception if such game does not exists.

            If such game is already stored in server object, return it.

            Else, load it from disk but **does not store it in server object**.

            To load and immediately store a game object in server object, please use method get_game().

            Method load_game() is convenient when you want to iterate over all games in server database
            without taking memory space.

            :param game_id: ID of game to load.
            :return: a ServerGame object
            :rtype: ServerGame
        """
        if game_id in self.games:
            return self.games[game_id]
        game_filename = os.path.join(ensure_path(self.games_path),
                                     '%s.json' % game_id)
        if not os.path.isfile(game_filename):
            raise exceptions.GameIdException()
        try:
            server_game = ServerGame.from_dict(
                load_json_from_disk(game_filename))  # type: ServerGame
            server_game.server = self
            server_game.filter_usernames(self.users.has_username)
            server_game.filter_tokens(self.users.has_token)
            return server_game
        except ValueError as exc:
            # Error occurred while parsing JSON file: bad JSON file.
            try:
                os.remove(game_filename)
            finally:
                # This should be an internal server error.
                raise exc

    def add_new_game(self, server_game):
        """ Add a new game data on server in memory and perform any addition processing.
            This does not save the game on disk.

            :type server_game: ServerGame
        """
        # Register game on memory.
        self.games[server_game.game_id] = server_game
        # Start DAIDE server for this game.
        self.start_new_daide_server(server_game.game_id)

    def get_game(self, game_id):
        """ Return game saved on server matching given game ID.
            Raise an exception if game ID not found.
            Return game if already loaded on memory, else load it from disk, store it,
            perform any loading/addition processing and return it.

            :param game_id: ID of game to load.
            :return: a ServerGame object.
            :rtype: ServerGame
        """
        server_game = self.load_game(game_id)
        if game_id not in self.games:
            LOGGER.debug('Game loaded: %s', game_id)
            # Check dummy powers for this game as soon as it's loaded from disk.
            self.register_dummy_power_names(server_game)
            # Register game on memory.
            self.games[server_game.game_id] = server_game
            # Start DAIDE server for this game.
            self.start_new_daide_server(server_game.game_id)
            # We have just loaded game from disk. Start it if necessary.
            if not server_game.start_master and server_game.has_expected_controls_count(
            ):
                # We may have to start game.
                if server_game.does_not_wait():
                    # We must process game.
                    server_game.process()
                    self.save_game(server_game)
                # Game must be scheduled only if active.
                if server_game.is_game_active:
                    LOGGER.debug('Game loaded and scheduled: %s',
                                 server_game.game_id)
                    self.schedule_game(server_game)
        return server_game

    def delete_game(self, server_game):
        """ Delete given game from server (both from memory and disk)
            and perform any post-deletion processing.

            :param server_game: game to delete
            :type server_game: ServerGame
        """
        if not (server_game.is_game_canceled or server_game.is_game_completed):
            server_game.set_status(strings.CANCELED)
        game_filename = os.path.join(self.games_path,
                                     '%s.json' % server_game.game_id)
        backup_game_filename = get_backup_filename(game_filename)
        if os.path.isfile(game_filename):
            os.remove(game_filename)
        if os.path.isfile(backup_game_filename):
            os.remove(backup_game_filename)
        self.games.pop(server_game.game_id, None)
        self.backup_games.pop(server_game.game_id, None)
        self.games_with_dummy_powers.pop(server_game.game_id, None)
        self.dispatched_dummy_powers.pop(server_game.game_id, None)
        # Stop DAIDE server associated to this game.
        self.stop_daide_server(server_game.game_id)

    @gen.coroutine
    def schedule_game(self, server_game):
        """ Add a game to scheduler only if game has a deadline and is not already scheduled.
            To add games without deadline, use force_game_processing().

            :param server_game: game
            :type server_game: ServerGame
        """
        if not (yield self.games_scheduler.has_data(server_game)
                ) and server_game.deadline:
            yield self.games_scheduler.add_data(server_game,
                                                server_game.deadline)

    @gen.coroutine
    def unschedule_game(self, server_game):
        """ Remove a game from scheduler.

            :param server_game: game
            :type server_game: ServerGame
        """
        if (yield self.games_scheduler.has_data(server_game)):
            yield self.games_scheduler.remove_data(server_game)

    @gen.coroutine
    def force_game_processing(self, server_game):
        """ Add a game to scheduler to be processed as soon as possible.
            Use this method instead of schedule_game() to explicitly add games with null deadline.

            :param server_game: game
            :type server_game: ServerGame
        """
        yield self.games_scheduler.no_wait(server_game, server_game.deadline,
                                           lambda g: g.does_not_wait())

    def start_game(self, server_game):
        """ Start given server game.

            :param server_game: server game
            :type server_game: ServerGame
        """
        server_game.set_status(strings.ACTIVE)
        self.schedule_game(server_game)
        Notifier(self).notify_game_status(server_game)

    def stop_game_if_needed(self, server_game):
        """ Stop game if it has not required number of controlled powers.
            Notify game if status changed.

            :param server_game: game to check
            :param server_game: game
            :type server_game: ServerGame
        """
        if server_game.is_game_active and (
                server_game.count_controlled_powers() <
                server_game.get_expected_controls_count()):
            server_game.set_status(strings.FORMING)
            self.unschedule_game(server_game)
            Notifier(self).notify_game_status(server_game)

    def user_is_master(self, username, server_game):
        """ Return True if given username is a game master for given game data.

            :param username: username
            :param server_game: game data
            :return: a boolean
            :type server_game: ServerGame
            :rtype: bool
        """
        return self.users.has_admin(username) or server_game.is_moderator(
            username)

    def user_is_omniscient(self, username, server_game):
        """ Return True if given username is omniscient for given game data.

            :param username: username
            :param server_game: game data
            :return: a boolean
            :type server_game: ServerGame
            :rtype: bool
        """
        return (self.users.has_admin(username)
                or server_game.is_moderator(username)
                or server_game.is_omniscient(username))

    def token_is_master(self, token, server_game):
        """ Return True if given token is a master token for given game data.

            :param token: token
            :param server_game: game data
            :return: a boolean
            :type server_game: ServerGame
            :rtype: bool
        """
        return (self.users.has_token(token) and self.user_is_master(
            self.users.get_name(token), server_game))

    def token_is_omniscient(self, token, server_game):
        """ Return True if given token is omniscient for given game data.

            :param token: token
            :param server_game: game data
            :return: a boolean
            :type server_game: ServerGame
            :rtype: bool
        """
        return (self.users.has_token(token) and self.user_is_omniscient(
            self.users.get_name(token), server_game))

    def create_game_id(self):
        """ Create and return a game ID not already used by a game in server database. """
        game_id = base64.b64encode(os.urandom(12), b'-_').decode('utf-8')
        while self.has_game_id(game_id):
            game_id = base64.b64encode(os.urandom(12), b'-_').decode('utf-8')
        return game_id

    def remove_token(self, token):
        """ Disconnect given token from related user and loaded games. Stop related games if needed,
            e.g. if a game does not have anymore expected number of controlled powers.
        """
        self.users.disconnect_token(token)
        for server_game in self.games.values():  # type: ServerGame
            server_game.remove_token(token)
            self.stop_game_if_needed(server_game)
            self.save_game(server_game)
        self.save_data()

    def assert_token(self, token, connection_handler):
        """ Check if given token is associated to an user, check if token is still valid,
            and link token to given connection handler. If any step failed, raise an exception.

            :param token: token to check
            :param connection_handler: connection handler associated to this token
        """
        if not self.users.has_token(token):
            raise exceptions.TokenException()
        if self.users.token_is_alive(token):
            self.users.relaunch_token(token)
            self.save_data()
        else:
            # Logout on server side and raise exception (invalid token).
            LOGGER.error('Token too old %s', token)
            self.remove_token(token)
            raise exceptions.TokenException()
        self.users.attach_connection_handler(token, connection_handler)

    def assert_admin_token(self, token):
        """ Check if given token is an admin token. Raise an exception on error. """
        if not self.users.token_is_admin(token):
            raise exceptions.AdminTokenException()

    def assert_master_token(self, token, server_game):
        """ Check if given token is a master token for given game data. Raise an exception on error.

            :param token: token
            :param server_game: game data
            :type server_game: ServerGame
        """
        if not self.token_is_master(token, server_game):
            raise exceptions.GameMasterTokenException()

    def cannot_create_more_games(self):
        """ Return True if server can not accept new games. """
        return self.max_games and self.count_server_games() >= self.max_games

    def get_map(self, map_name):
        """ Return map power names for given map name. """
        return self.available_maps.get(map_name, None)

    def start_new_daide_server(self, game_id, port=None):
        """ Start a new DAIDE TCP server to handle DAIDE clients connections

            :param game_id: game id to pass to the DAIDE server
            :param port: the port to use. If None, an available random port will be used
        """
        if port in self.daide_servers:
            raise RuntimeError('Port already in used by a DAIDE server')

        for server in self.daide_servers.values():
            if server.game_id == game_id:
                return None

        while port is None or is_port_opened(port):
            port = randint(8000, 8999)

        # Create DAIDE TCP server
        daide_server = DaideServer(self, game_id)
        daide_server.listen(port)
        self.daide_servers[port] = daide_server
        LOGGER.info('DAIDE server running for game %s on port %d', game_id,
                    port)
        return port

    def stop_daide_server(self, game_id):
        """ Stop one or all DAIDE TCP server

            :param game_id: game id of the DAIDE server. If None, all servers will be stopped
            :type game_id: str
        """
        for port in list(self.daide_servers.keys()):
            server = self.daide_servers[port]
            if game_id is None or server.game_id == game_id:
                server.stop()
                del self.daide_servers[port]

    def get_daide_port(self, game_id):
        """ Get the DAIDE port opened for a specific game_id

            :param game_id: game id of the DAIDE server.
        """
        for port, server in self.daide_servers.items():
            if server.game_id == game_id:
                return port
        return None
コード例 #50
0
ファイル: base.py プロジェクト: andrey-yantsen/tobot
class Base:
    SETTINGS_PER_BOT = 1
    SETTINGS_PER_USER = 2
    SETTINGS_TYPE = SETTINGS_PER_BOT

    def __init__(self, token, stages_builder: callable, **kwargs):
        self.token = token
        self.settings = kwargs.pop('settings', {})

        self.ignore_403_in_handlers = kwargs.pop('ignore_403_in_handlers',
                                                 False)
        for key, value in kwargs.items():
            self.__dict__[key] = value

        self.api = Api(token, self.process_update)
        self.user_settings = {}
        self.commands = {}
        self.raw_commands_tree = {}
        self.cancellation_handler = None
        self.unknown_command_handler = None
        self.updates_queue = Queue(
            kwargs.get('updates_queue_handlers', 4) * 10)
        self._init_handlers()
        self._stages = stages_builder(bot_id=self.bot_id)
        self._finished = Event()
        self._supported_languages = tuple([])

    def _init_handlers(self):
        raise NotImplementedError()

    def _add_handler(self,
                     handler: callable,
                     name: pgettext = None,
                     previous_handler: callable = None,
                     is_final=True):
        if handler not in self.commands:
            self.commands[handler] = Command(self, handler, name)

        if previous_handler and previous_handler not in self.commands:
            raise BotError('Previous command is unknown')

        previous_handler_name = previous_handler.__name__ if previous_handler else 'none'

        if previous_handler_name not in self.raw_commands_tree:
            self.raw_commands_tree[previous_handler_name] = []
        else:
            for h, _ in self.raw_commands_tree[previous_handler_name]:
                if h.handler == handler and handler != self.cancellation_handler:
                    raise BotError('Command already registered')
                elif h.handler == handler:
                    return

        self.raw_commands_tree[previous_handler_name].append(
            (self.commands[handler], is_final))

        if not is_final and self.cancellation_handler:
            self._add_handler(self.cancellation_handler,
                              previous_handler=handler,
                              is_final=True)

    def _load_user_settings_per_user(self):
        return {}

    def _update_settings_for_bot(self, settings):
        pass

    def _update_settings_for_user(self, user_id, settings):
        pass

    @coroutine
    def update_settings(self, user_id, **kwargs):
        logging.info('[bot#%s] Updating settings to %s by user#%s',
                     self.bot_id, kwargs, user_id)
        if self.SETTINGS_TYPE == self.SETTINGS_PER_BOT:
            self.settings.update(kwargs)
            yield maybe_future(self._update_settings_for_bot(self.settings))
        else:
            if user_id not in self.user_settings:
                self.user_settings[user_id] = kwargs
            else:
                self.user_settings[user_id].update(kwargs)

            yield maybe_future(
                self._update_settings_for_user(user_id, self.settings))

    def get_settings(self, user_id):
        if self.SETTINGS_TYPE == self.SETTINGS_PER_BOT:
            return deepcopy(self.settings)
        else:
            return deepcopy(self.user_settings.get(user_id, {}))

    @coroutine
    def start(self):
        logging.debug('[bot#%s] Starting', self.bot_id)
        self._finished.clear()
        self.user_settings = yield maybe_future(
            self._load_user_settings_per_user())
        handlers_f = [
            self._update_processor()
            for _ in range(self.settings.get('updates_queue_handlers', 4))
        ]
        yield maybe_future(self._stages.restore())
        try:
            yield self.api.wait_commands()
        finally:
            self.stop()
            yield handlers_f

    def stop(self):
        assert not self._finished.is_set()
        logging.debug('[bot#%s] Terminating', self.bot_id)
        self._finished.set()
        if self.api.is_alive:
            self.api.stop()

    @property
    def is_alive(self):
        return not self._finished.is_set()

    @coroutine
    def process_update(self, update):
        yield self.updates_queue.put(update)

    @staticmethod
    def get_stage_key(update):
        if 'message' in update:
            chat_id = update['message']['chat']['id']
            user_id = update['message']['from']['id']
        elif 'callback_query' in update:
            if 'message' in update['callback_query']:
                chat_id = update['callback_query']['message']['chat']['id']
            else:
                chat_id = update['callback_query']['from']['id']
            user_id = update['callback_query']['from']['id']
        elif 'channel_post' in update:
            chat_id = update['channel_post']['chat']['id']
            user_id = update['channel_post']['chat']['id']
        elif 'edited_channel_post' in update:
            chat_id = update['edited_channel_post']['chat']['id']
            user_id = update['edited_channel_post']['chat']['id']
        else:
            raise BotError('Unable to get stage_key for this type of update')

        return '%s-%s' % (user_id, chat_id)

    @coroutine
    def _update_processor(self):
        while not self._finished.is_set():
            try:
                received_update = yield self.updates_queue.get(
                    timedelta(seconds=3))
            except:
                continue

            del received_update['update_id']

            try:
                stage_key = self.get_stage_key(received_update)
                current_stage = self._stages[stage_key]
                if current_stage:
                    stage_data = current_stage[1]
                    received_update.update(current_stage[1])
                    commands_tree = self.raw_commands_tree[current_stage[0]]
                else:
                    stage_data = {}
                    commands_tree = self.raw_commands_tree['none']

                processing_result = False
                for command_in_tree in commands_tree:
                    try:
                        processing_result = yield command_in_tree[0](
                            **received_update)
                    except ApiError as e:
                        if not self.ignore_403_in_handlers or str(
                                e.code) != '403':
                            raise
                        else:
                            logging.exception(
                                'Got exception in message handler')
                    if processing_result is not False:
                        if not command_in_tree[
                                1] and processing_result is not None:
                            if processing_result is True:
                                processing_result = {}
                            stage_data.update(processing_result)
                            self._stages[stage_key] = command_in_tree[
                                0].handler, stage_data
                        elif processing_result is not None:
                            del self._stages[stage_key]
                        break

                    if processing_result is not False:
                        break

                if processing_result is False:
                    logging.debug('Handler not found: %s',
                                  dumps(received_update, indent=2))
                    if self.unknown_command_handler:
                        try:
                            yield maybe_future(
                                self.unknown_command_handler(
                                    self, **received_update))
                        except ApiError as e:
                            if not self.ignore_403_in_handlers or str(
                                    e.code) != '403':
                                raise
                            else:
                                logging.exception(
                                    'Got exception in message handler')
            except:
                logging.exception(
                    '[bot#%s] Got error while processing message %s',
                    self.bot_id, dumps(received_update, indent=2))

            self.updates_queue.task_done()

    def __getattr__(self, name):
        def outer_wrapper(f):
            @wraps(f)
            def wrapper(*args, **kwargs):
                l = locale.get('en_US')
                if self.SETTINGS_TYPE == self.SETTINGS_PER_BOT:
                    l = locale.get(self.settings.get('locale', 'en_US'))
                elif self.SETTINGS_TYPE == self.SETTINGS_PER_USER:
                    chat_id = None
                    if 'reply_to_message' in kwargs:
                        if 'chat' in kwargs['reply_to_message']:
                            chat_id = kwargs['reply_to_message']['chat']['id']
                        elif 'from' in kwargs['reply_to_message']:
                            chat_id = kwargs['reply_to_message']['from']['id']
                    elif 'chat_id' in kwargs:
                        chat_id = kwargs['chat_id']

                    if chat_id in self.user_settings:
                        l = locale.get(self.user_settings[chat_id].get(
                            'locale', 'en_US'))

                return f(*set_locale_recursive(args, l),
                         **set_locale_recursive(kwargs, l))

            return wrapper

        if hasattr(self.api, name):
            attr = getattr(self.api, name)
            if isinstance(attr, type(self.stop)):
                return outer_wrapper(attr)
            else:
                return attr
        else:
            raise AttributeError("'%s' object has no attribute '%s'" %
                                 (self.__class__.__name__, name))
コード例 #51
0
class DebugpyMessageQueue:

    HEADER = 'Content-Length: '
    HEADER_LENGTH = 16
    SEPARATOR = '\r\n\r\n'
    SEPARATOR_LENGTH = 4

    def __init__(self, event_callback, log):
        self.tcp_buffer = ''
        self._reset_tcp_pos()
        self.event_callback = event_callback
        self.message_queue = Queue()
        self.log = log

    def _reset_tcp_pos(self):
        self.header_pos = -1
        self.separator_pos = -1
        self.message_size = 0
        self.message_pos = -1

    def _put_message(self, raw_msg):
        self.log.debug('QUEUE - _put_message:')
        msg = jsonapi.loads(raw_msg)
        if msg['type'] == 'event':
            self.log.debug('QUEUE - received event:')
            self.log.debug(msg)
            self.event_callback(msg)
        else:
            self.log.debug('QUEUE - put message:')
            self.log.debug(msg)
            self.message_queue.put_nowait(msg)

    def put_tcp_frame(self, frame):
        self.tcp_buffer += frame

        self.log.debug('QUEUE - received frame')
        while True:
            # Finds header
            if self.header_pos == -1:
                self.header_pos = self.tcp_buffer.find(
                    DebugpyMessageQueue.HEADER)
            if self.header_pos == -1:
                return

            self.log.debug('QUEUE - found header at pos %i', self.header_pos)

            #Finds separator
            if self.separator_pos == -1:
                hint = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH
                self.separator_pos = self.tcp_buffer.find(
                    DebugpyMessageQueue.SEPARATOR, hint)
            if self.separator_pos == -1:
                return

            self.log.debug('QUEUE - found separator at pos %i',
                           self.separator_pos)

            if self.message_pos == -1:
                size_pos = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH
                self.message_pos = self.separator_pos + DebugpyMessageQueue.SEPARATOR_LENGTH
                self.message_size = int(
                    self.tcp_buffer[size_pos:self.separator_pos])

            self.log.debug('QUEUE - found message at pos %i', self.message_pos)
            self.log.debug('QUEUE - message size is %i', self.message_size)

            if len(self.tcp_buffer) - self.message_pos < self.message_size:
                return

            self._put_message(
                self.tcp_buffer[self.message_pos:self.message_pos +
                                self.message_size])
            if len(self.tcp_buffer) - self.message_pos == self.message_size:
                self.log.debug('QUEUE - resetting tcp_buffer')
                self.tcp_buffer = ''
                self._reset_tcp_pos()
                return
            else:
                self.tcp_buffer = self.tcp_buffer[self.message_pos +
                                                  self.message_size:]
                self.log.debug('QUEUE - slicing tcp_buffer: %s',
                               self.tcp_buffer)
                self._reset_tcp_pos()

    async def get_message(self):
        return await self.message_queue.get()
コード例 #52
0
ファイル: pubnub_tornado.py プロジェクト: QSDZvonimir/python
class TornadoSubscriptionManager(SubscriptionManager):
    def __init__(self, pubnub_instance):

        subscription_manager = self

        self._message_queue = Queue()
        self._consumer_event = Event()
        self._cancellation_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = TornadoReconnectionManager(
            pubnub_instance)

        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class TornadoReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(
                    pn_status)

        self._reconnection_listener = TornadoReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(
            self._reconnection_listener)

    def _set_consumer_event(self):
        self._consumer_event.set()

    def _message_queue_put(self, message):
        self._message_queue.put(message)

    def _start_worker(self):
        self._consumer = TornadoSubscribeMessageWorker(self._pubnub,
                                                       self._listener_manager,
                                                       self._message_queue,
                                                       self._consumer_event)
        run = stack_context.wrap(self._consumer.run)
        self._pubnub.ioloop.spawn_callback(run)

    def reconnect(self):
        self._should_stop = False
        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
        # self._register_heartbeat_timer()

    def disconnect(self):
        self._should_stop = True
        self._stop_heartbeat_timer()
        self._stop_subscribe_loop()

    @tornado.gen.coroutine
    def _start_subscribe_loop(self):
        self._stop_subscribe_loop()

        yield self._subscription_lock.acquire()

        self._cancellation_event.clear()

        combined_channels = self._subscription_state.prepare_channel_list(True)
        combined_groups = self._subscription_state.prepare_channel_group_list(
            True)

        if len(combined_channels) == 0 and len(combined_groups) == 0:
            return

        envelope_future = Subscribe(self._pubnub) \
            .channels(combined_channels).channel_groups(combined_groups) \
            .timetoken(self._timetoken).region(self._region) \
            .filter_expression(self._pubnub.config.filter_expression) \
            .cancellation_event(self._cancellation_event) \
            .future()

        canceller_future = self._cancellation_event.wait()

        wi = tornado.gen.WaitIterator(envelope_future, canceller_future)

        # iterates 2 times: one for result one for cancelled
        while not wi.done():
            try:
                result = yield wi.next()
            except Exception as e:
                # TODO: verify the error will not be eaten
                logger.error(e)
                raise
            else:
                if wi.current_future == envelope_future:
                    e = result
                elif wi.current_future == canceller_future:
                    return
                else:
                    raise Exception("Unexpected future resolved: %s" %
                                    str(wi.current_future))

                if e.is_error():
                    # 599 error doesn't works - tornado use this status code
                    # for a wide range of errors, for ex:
                    # HTTP Server Error (599): [Errno -2] Name or service not known
                    if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
                        self._pubnub.ioloop.spawn_callback(
                            self._start_subscribe_loop)
                        return

                    logger.error("Exception in subscribe loop: %s" % str(e))

                    if e.status is not None and e.status.category == PNStatusCategory.PNAccessDeniedCategory:
                        e.status.operation = PNOperationType.PNUnsubscribeOperation

                    self._listener_manager.announce_status(e.status)

                    self._reconnection_manager.start_polling()
                    self.disconnect()
                    return
                else:
                    self._handle_endpoint_call(e.result, e.status)

                    self._pubnub.ioloop.spawn_callback(
                        self._start_subscribe_loop)

            finally:
                self._cancellation_event.set()
                yield tornado.gen.moment
                self._subscription_lock.release()
                self._cancellation_event.clear()
                break

    def _stop_subscribe_loop(self):
        if self._cancellation_event is not None and not self._cancellation_event.is_set(
        ):
            self._cancellation_event.set()

    def _stop_heartbeat_timer(self):
        if self._heartbeat_periodic_callback is not None:
            self._heartbeat_periodic_callback.stop()

    def _register_heartbeat_timer(self):
        super(TornadoSubscriptionManager, self)._register_heartbeat_timer()
        self._heartbeat_periodic_callback = PeriodicCallback(
            stack_context.wrap(self._perform_heartbeat_loop),
            self._pubnub.config.heartbeat_interval *
            TornadoSubscriptionManager.HEARTBEAT_INTERVAL_MULTIPLIER,
            self._pubnub.ioloop)
        self._heartbeat_periodic_callback.start()

    @tornado.gen.coroutine
    def _perform_heartbeat_loop(self):
        if self._heartbeat_call is not None:
            # TODO: cancel call
            pass

        cancellation_event = Event()
        state_payload = self._subscription_state.state_payload()
        presence_channels = self._subscription_state.prepare_channel_list(
            False)
        presence_groups = self._subscription_state.prepare_channel_group_list(
            False)

        if len(presence_channels) == 0 and len(presence_groups) == 0:
            return

        try:
            envelope = yield self._pubnub.heartbeat() \
                .channels(presence_channels) \
                .channel_groups(presence_groups) \
                .state(state_payload) \
                .cancellation_event(cancellation_event) \
                .future()

            heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options
            if envelope.status.is_error:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \
                        heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)
            else:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)

        except PubNubTornadoException:
            pass
            # TODO: check correctness
            # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
            #     self._start_subscribe_loop()
            # else:
            #     self._listener_manager.announce_status(e.status)
        except Exception as e:
            print(e)
        finally:
            cancellation_event.set()

    @tornado.gen.coroutine
    def _send_leave(self, unsubscribe_operation):
        envelope = yield Leave(self._pubnub) \
            .channels(unsubscribe_operation.channels) \
            .channel_groups(unsubscribe_operation.channel_groups).future()
        self._listener_manager.announce_status(envelope.status)
コード例 #53
0
    def __init__(self, server_dir=None, **kwargs):
        """ Initialize the server.
            Server data is stored in folder ``<working directory>/data``.

            :param server_dir: path of folder in (from) which server data will be saved (loaded).
                If None, working directory (where script is executed) will be used.
            :param kwargs: (optional) values for some public configurable server attributes.
                Given values will overwrite values saved on disk.
        """

        # File paths and attributes related to database.
        server_dir = get_absolute_path(server_dir)
        if server_dir in self.__class__.__cache__:
            return
        if not os.path.exists(server_dir) or not os.path.isdir(server_dir):
            raise exceptions.ServerDirException(server_dir)
        self.data_path = os.path.join(server_dir, 'data')
        self.games_path = os.path.join(self.data_path, 'games')

        # Data in memory (not stored on disk).
        self.notifications = Queue()
        self.games_scheduler = Scheduler(1, self._process_game)
        self.backup_server = None
        self.backup_games = {}
        self.interruption_handler = InterruptionHandler(self)
        # Backend objects used to run server. If None, server is not yet started.
        # Initialized when you call Server.start() (see method below).
        self.backend = None  # type: _ServerBackend

        # Database (stored on disk).
        self.allow_registrations = True
        self.max_games = 0
        self.remove_canceled_games = False
        self.backup_delay_seconds = constants.DEFAULT_BACKUP_DELAY_SECONDS
        self.ping_seconds = constants.DEFAULT_PING_SECONDS
        self.users = None  # type: Users  # Users and administrators usernames.
        self.available_maps = {
        }  # type: Dict[str, List[str]] # {"map_name" => list("map_power")}
        self.maps_mtime = 0  # Latest maps modification date (used to manage maps cache in server object).

        # Server games loaded on memory (stored on disk).
        # Saved separately (each game in one JSON file).
        # Each game also stores tokens connected (player tokens, observer tokens, omniscient tokens).
        self.games = {}  # type: Dict[str, ServerGame]

        # Dictionary mapping game ID to list of power names.
        self.games_with_dummy_powers = {}  # type: Dict[str, List[str]]

        # Dictionary mapping a game ID present in games_with_dummy_powers, to
        # a couple of associated bot token and time when bot token was associated to this game ID.
        # If there is no bot token associated, couple is (None, None).
        self.dispatched_dummy_powers = {}  # type: dict{str, tuple}

        # DAIDE TCP servers listening to a game's dedicated port.
        self.daide_servers = {}  # {port: daide_server}

        # Load data on memory.
        self._load()

        # If necessary, updated server configurable attributes from kwargs.
        self.allow_registrations = bool(
            kwargs.pop(strings.ALLOW_REGISTRATIONS, self.allow_registrations))
        self.max_games = int(kwargs.pop(strings.MAX_GAMES, self.max_games))
        self.remove_canceled_games = bool(
            kwargs.pop(strings.REMOVE_CANCELED_GAMES,
                       self.remove_canceled_games))
        self.backup_delay_seconds = int(
            kwargs.pop(strings.BACKUP_DELAY_SECONDS,
                       self.backup_delay_seconds))
        self.ping_seconds = int(
            kwargs.pop(strings.PING_SECONDS, self.ping_seconds))
        assert not kwargs
        LOGGER.debug('Ping        : %s', self.ping_seconds)
        LOGGER.debug('Backup delay: %s', self.backup_delay_seconds)

        # Add server on servers cache.
        self.__class__.__cache__[server_dir] = self
コード例 #54
0
async def test_listeners(known_server, handlers, jsonrpc_init_msg):
    """ will some listeners listen?
    """
    handler, ws_handler = handlers
    manager = handler.manager

    manager.all_listeners = ["jupyter_lsp.tests.listener.dummy_listener"]

    manager.initialize()
    manager._listeners["client"] = []  # hide predefined client listeners

    assert len(manager._listeners["all"]) == 1

    dummy_listener = manager._listeners["all"][0]
    assert re.match(
        ("<MessageListener listener=<function dummy_listener at .*?>,"
         " method=None, language_server=None>"),
        repr(dummy_listener),
    )

    handler_listened = Queue()
    server_listened = Queue()
    all_listened = Queue()

    # some client listeners
    @lsp_message_listener("client",
                          language_server=known_server,
                          method="initialize")
    async def client_listener(scope, message, language_server, manager):
        await handler_listened.put(message)

    @lsp_message_listener("client", method=r"not-a-method")
    async def other_client_listener(scope, message, language_server,
                                    manager):  # pragma: no cover
        await handler_listened.put(message)
        raise NotImplementedError("shouldn't get here")

    # some server listeners
    @lsp_message_listener("server", language_server=None, method=None)
    async def server_listener(scope, message, language_server, manager):
        await server_listened.put(message)

    @lsp_message_listener("server", language_server=r"not-a-language-server")
    async def other_server_listener(scope, message, language_server,
                                    manager):  # pragma: no cover
        await handler_listened.put(message)
        raise NotImplementedError("shouldn't get here")

    # an all listener
    @lsp_message_listener("all")
    async def all_listener(scope, message, language_server,
                           manager):  # pragma: no cover
        await all_listened.put(message)

    assert len(manager._listeners["server"]) == 2
    assert len(manager._listeners["client"]) == 2
    assert len(manager._listeners["all"]) == 2

    ws_handler.open(known_server)

    await ws_handler.on_message(jsonrpc_init_msg)

    results = await asyncio.wait_for(
        asyncio.gather(
            handler_listened.get(),
            server_listened.get(),
            all_listened.get(),
            all_listened.get(),
            return_exceptions=True,
        ),
        20,
    )
    assert all([isinstance(res, dict) for res in results])

    ws_handler.on_close()

    handler_listened.task_done()
    server_listened.task_done()
    all_listened.task_done()
    all_listened.task_done()

    [
        manager.unregister_message_listener(listener) for listener in [
            client_listener,
            other_client_listener,
            server_listener,
            other_server_listener,
            all_listener,
        ]
    ]

    assert not manager._listeners["server"]
    assert not manager._listeners["client"]
    assert len(manager._listeners["all"]) == 1
コード例 #55
0
class CommunicationSocketHandler(WebSocketHandler):
    waiters = set()
    i = 0

    def initialize(self):
        self.messages = Queue()

    def get_compression_options(self):
        # Non-None enables compression with default options.
        return {}

    def open(self):
        logging.info("websocket open")
        CommunicationSocketHandler.waiters.add(self)

    def on_close(self):
        logging.info("websocket close")
        CommunicationSocketHandler.waiters.remove(self)
        self._close()

    def on_message(self, message):
        logging.info("got message \n%r", message)
        debug = DecryptMessage(message)
        logging.info("try parse message %s", str(debug.get_all_b64()))
        logging.info("Output: %r", debug.decrypt())
        out = OutgoingPingMessage()
        outSend = out.generate()
        logging.info("send message plain: %s", outSend)
        enc = EncryptMessage(outSend.get('payload'))
        payload = str(enc.get_payload())
        time.sleep(5)
        self.send_raw(payload)

    def send_raw(self, data):
        if self.ws_connection is None:
            raise WebSocketClosedError()
        return self.ws_connection.write_message(data, binary=False)

    def on_pong(self, data):
        """Invoked when the response to a ping frame is received."""
        logging.info("pong received")

    def on_ping(self, data):
        """Invoked when the a ping frame is received."""
        logging.info("ping received")

    def parse_message(self, message):
        """"""
        return {}

    def send(self, message):
        logging.info("trying send message")
        try:
            self.write_message(dict(value=message))
        except WebSocketClosedError:
            logging.info("socket closed error")
            self._close()

    def _close(self):
        self.finished = True

    @gen.coroutine
    def submit(self, message):
        yield self.messages.put(message)

    @gen.coroutine
    def run(self):
        logging.info("Run")
        while not self.finished:
            message = yield self.messages.get()
            print("New message: " + str(message))
            self.send(message)
コード例 #56
0
ファイル: pubnub_tornado.py プロジェクト: QSDZvonimir/python
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()
        self.error_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(
                status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(
                status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()
        elif status.is_error():
            self.error_queue.put_nowait(status.error_data.exception)

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    @tornado.gen.coroutine
    def _wait_for(self, coro):
        error = self.error_queue.get()
        wi = tornado.gen.WaitIterator(coro, error)

        while not wi.done():
            result = yield wi.next()

            if wi.current_future == coro:
                raise gen.Return(result)
            elif wi.current_future == error:
                raise result
            else:
                raise Exception("Unexpected future resolved: %s" %
                                str(wi.current_future))

    @tornado.gen.coroutine
    def wait_for_connect(self):
        if not self.connected_event.is_set():
            yield self._wait_for(self.connected_event.wait())
        else:
            raise Exception("instance is already connected")

    @tornado.gen.coroutine
    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            yield self._wait_for(self.disconnected_event.wait())
        else:
            raise Exception("instance is already disconnected")

    @tornado.gen.coroutine
    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:  # NOQA
                env = yield self._wait_for(self.message_queue.get())
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.message_queue.task_done()

    @tornado.gen.coroutine
    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:
                try:
                    env = yield self._wait_for(self.presence_queue.get())
                except:  # NOQA E722 pylint: disable=W0702
                    break
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.presence_queue.task_done()
コード例 #57
0
 def initialize(self):
     self.messages = Queue()
コード例 #58
0
class Kernel(SingletonConfigurable):

    #---------------------------------------------------------------------------
    # Kernel interface
    #---------------------------------------------------------------------------

    # attribute to override with a GUI
    eventloop = Any(None)

    @observe('eventloop')
    def _update_eventloop(self, change):
        """schedule call to eventloop from IOLoop"""
        loop = ioloop.IOLoop.current()
        if change.new is not None:
            loop.add_callback(self.enter_eventloop)

    session = Instance(Session, allow_none=True)
    profile_dir = Instance('IPython.core.profiledir.ProfileDir', allow_none=True)
    shell_stream = Instance(ZMQStream, allow_none=True)

    @property
    def shell_streams(self):
        warnings.warn(
            'Property shell_streams is deprecated in favor of shell_stream',
            DeprecationWarning
        )
        return [shell_stream]

    control_stream = Instance(ZMQStream, allow_none=True)

    debug_shell_socket = Any()

    control_thread = Any()
    iopub_socket = Any()
    iopub_thread = Any()
    stdin_socket = Any()
    log = Instance(logging.Logger, allow_none=True)

    # identities:
    int_id = Integer(-1)
    ident = Unicode()

    @default('ident')
    def _default_ident(self):
        return str(uuid.uuid4())

    # This should be overridden by wrapper kernels that implement any real
    # language.
    language_info = {}

    # any links that should go in the help menu
    help_links = List()

    # Private interface

    _darwin_app_nap = Bool(True,
        help="""Whether to use appnope for compatibility with OS X App Nap.

        Only affects OS X >= 10.9.
        """
    ).tag(config=True)

    # track associations with current request
    _allow_stdin = Bool(False)
    _parent_header = Dict({'shell': {}, 'control': {}})
    _parent_ident = Dict({'shell': b'', 'control': b''})
    # Time to sleep after flushing the stdout/err buffers in each execute
    # cycle.  While this introduces a hard limit on the minimal latency of the
    # execute cycle, it helps prevent output synchronization problems for
    # clients.
    # Units are in seconds.  The minimum zmq latency on local host is probably
    # ~150 microseconds, set this to 500us for now.  We may need to increase it
    # a little if it's not enough after more interactive testing.
    _execute_sleep = Float(0.0005).tag(config=True)

    # Frequency of the kernel's event loop.
    # Units are in seconds, kernel subclasses for GUI toolkits may need to
    # adapt to milliseconds.
    _poll_interval = Float(0.01).tag(config=True)

    stop_on_error_timeout = Float(
        0.1,
        config=True,
        help="""time (in seconds) to wait for messages to arrive
        when aborting queued requests after an error.

        Requests that arrive within this window after an error
        will be cancelled.

        Increase in the event of unusually slow network
        causing significant delays,
        which can manifest as e.g. "Run all" in a notebook
        aborting some, but not all, messages after an error.
        """
    )

    # If the shutdown was requested over the network, we leave here the
    # necessary reply message so it can be sent by our registered atexit
    # handler.  This ensures that the reply is only sent to clients truly at
    # the end of our shutdown process (which happens after the underlying
    # IPython shell's own shutdown).
    _shutdown_message = None

    # This is a dict of port number that the kernel is listening on. It is set
    # by record_ports and used by connect_request.
    _recorded_ports = Dict()

    # set of aborted msg_ids
    aborted = Set()

    # Track execution count here. For IPython, we override this to use the
    # execution count we store in the shell.
    execution_count = 0

    msg_types = [
        'execute_request', 'complete_request',
        'inspect_request', 'history_request',
        'comm_info_request', 'kernel_info_request',
        'connect_request', 'shutdown_request',
        'is_complete_request',
        # deprecated:
        'apply_request',
    ]
    # add deprecated ipyparallel control messages
    control_msg_types = msg_types + ['clear_request', 'abort_request', 'debug_request']

    def __init__(self, **kwargs):
        super(Kernel, self).__init__(**kwargs)
        # Build dict of handlers for message types
        self.shell_handlers = {}
        for msg_type in self.msg_types:
            self.shell_handlers[msg_type] = getattr(self, msg_type)

        self.control_handlers = {}
        for msg_type in self.control_msg_types:
            self.control_handlers[msg_type] = getattr(self, msg_type)

        self.control_queue = Queue()
        if 'control_thread' in kwargs:
            kwargs['control_thread'].io_loop.add_callback(self.poll_control_queue)

    @gen.coroutine
    def dispatch_control(self, msg):
        self.control_queue.put_nowait(msg)

    @gen.coroutine
    def poll_control_queue(self):
        while True:
            msg = yield self.control_queue.get()
            yield self.process_control(msg)

    @gen.coroutine
    def process_control(self, msg):
        """dispatch control requests"""
        idents, msg = self.session.feed_identities(msg, copy=False)
        try:
            msg = self.session.deserialize(msg, content=True, copy=False)
        except:
            self.log.error("Invalid Control Message", exc_info=True)
            return

        self.log.debug("Control received: %s", msg)

        # Set the parent message for side effects.
        self.set_parent(idents, msg, channel='control')
        self._publish_status('busy', 'control')

        header = msg['header']
        msg_type = header['msg_type']

        handler = self.control_handlers.get(msg_type, None)
        if handler is None:
            self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type)
        else:
            try:
                yield gen.maybe_future(handler(self.control_stream, idents, msg))
            except Exception:
                self.log.error("Exception in control handler:", exc_info=True)

        sys.stdout.flush()
        sys.stderr.flush()
        self._publish_status('idle', 'control')
        # flush to ensure reply is sent
        self.control_stream.flush(zmq.POLLOUT)

    def should_handle(self, stream, msg, idents):
        """Check whether a shell-channel message should be handled

        Allows subclasses to prevent handling of certain messages (e.g. aborted requests).
        """
        msg_id = msg['header']['msg_id']
        if msg_id in self.aborted:
            msg_type = msg['header']['msg_type']
            # is it safe to assume a msg_id will not be resubmitted?
            self.aborted.remove(msg_id)
            self._send_abort_reply(stream, msg, idents)
            return False
        return True

    @gen.coroutine
    def dispatch_shell(self, msg):
        """dispatch shell requests"""
        idents, msg = self.session.feed_identities(msg, copy=False)
        try:
            msg = self.session.deserialize(msg, content=True, copy=False)
        except:
            self.log.error("Invalid Message", exc_info=True)
            return

        # Set the parent message for side effects.
        self.set_parent(idents, msg, channel='shell')
        self._publish_status('busy', 'shell')

        msg_type = msg['header']['msg_type']

        # Only abort execute requests
        if self._aborting and msg_type == 'execute_request':
            self._send_abort_reply(self.shell_stream, msg, idents)
            self._publish_status('idle', 'shell')
            # flush to ensure reply is sent before
            # handling the next request
            self.shell_stream.flush(zmq.POLLOUT)
            return

        # Print some info about this message and leave a '--->' marker, so it's
        # easier to trace visually the message chain when debugging.  Each
        # handler prints its message at the end.
        self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
        self.log.debug('   Content: %s\n   --->\n   ', msg['content'])

        if not self.should_handle(self.shell_stream, msg, idents):
            return

        handler = self.shell_handlers.get(msg_type, None)
        if handler is None:
            self.log.warning("Unknown message type: %r", msg_type)
        else:
            self.log.debug("%s: %s", msg_type, msg)
            try:
                self.pre_handler_hook()
            except Exception:
                self.log.debug("Unable to signal in pre_handler_hook:", exc_info=True)
            try:
                yield gen.maybe_future(handler(self.shell_stream, idents, msg))
            except Exception:
                self.log.error("Exception in message handler:", exc_info=True)
            finally:
                try:
                    self.post_handler_hook()
                except Exception:
                    self.log.debug("Unable to signal in post_handler_hook:", exc_info=True)

        sys.stdout.flush()
        sys.stderr.flush()
        self._publish_status('idle', 'shell')
        # flush to ensure reply is sent before
        # handling the next request
        self.shell_stream.flush(zmq.POLLOUT)

    def pre_handler_hook(self):
        """Hook to execute before calling message handler"""
        # ensure default_int_handler during handler call
        self.saved_sigint_handler = signal(SIGINT, default_int_handler)

    def post_handler_hook(self):
        """Hook to execute after calling message handler"""
        signal(SIGINT, self.saved_sigint_handler)

    def enter_eventloop(self):
        """enter eventloop"""
        self.log.info("Entering eventloop %s", self.eventloop)
        # record handle, so we can check when this changes
        eventloop = self.eventloop
        if eventloop is None:
            self.log.info("Exiting as there is no eventloop")
            return

        def advance_eventloop():
            # check if eventloop changed:
            if self.eventloop is not eventloop:
                self.log.info("exiting eventloop %s", eventloop)
                return
            if self.msg_queue.qsize():
                self.log.debug("Delaying eventloop due to waiting messages")
                # still messages to process, make the eventloop wait
                schedule_next()
                return
            self.log.debug("Advancing eventloop %s", eventloop)
            try:
                eventloop(self)
            except KeyboardInterrupt:
                # Ctrl-C shouldn't crash the kernel
                self.log.error("KeyboardInterrupt caught in kernel")
                pass
            if self.eventloop is eventloop:
                # schedule advance again
                schedule_next()

        def schedule_next():
            """Schedule the next advance of the eventloop"""
            # flush the eventloop every so often,
            # giving us a chance to handle messages in the meantime
            self.log.debug("Scheduling eventloop advance")
            self.io_loop.call_later(1, advance_eventloop)

        # begin polling the eventloop
        schedule_next()

    @gen.coroutine
    def do_one_iteration(self):
        """Process a single shell message

        Any pending control messages will be flushed as well

        .. versionchanged:: 5
            This is now a coroutine
        """
        # flush messages off of shell stream into the message queue
        self.shell_stream.flush()
        # process at most one shell message per iteration
        yield self.process_one(wait=False)

    @gen.coroutine
    def process_one(self, wait=True):
        """Process one request

        Returns None if no message was handled.
        """
        if wait:
            t, dispatch, args = yield self.msg_queue.get()
        else:
            try:
                t, dispatch, args = self.msg_queue.get_nowait()
            except QueueEmpty:
                return None
        yield gen.maybe_future(dispatch(*args))

    @gen.coroutine
    def dispatch_queue(self):
        """Coroutine to preserve order of message handling

        Ensures that only one message is processing at a time,
        even when the handler is async
        """

        while True:
            # ensure control stream is flushed before processing shell messages
            if self.control_stream:
                self.control_stream.flush()
            # receive the next message and handle it
            try:
                yield self.process_one()
            except Exception:
                self.log.exception("Error in message handler")

    _message_counter = Any(
        help="""Monotonic counter of messages
        """,
    )
    @default('_message_counter')
    def _message_counter_default(self):
        return itertools.count()

    def schedule_dispatch(self, dispatch, *args):
        """schedule a message for dispatch"""
        idx = next(self._message_counter)

        self.msg_queue.put_nowait(
            (
                idx,
                dispatch,
                args,
            )
        )
        # ensure the eventloop wakes up
        self.io_loop.add_callback(lambda: None)

    def start(self):
        """register dispatchers for streams"""
        self.io_loop = ioloop.IOLoop.current()
        self.msg_queue = Queue()
        self.io_loop.add_callback(self.dispatch_queue)

        self.control_stream.on_recv(self.dispatch_control, copy=False)

        self.shell_stream.on_recv(
            partial(
                self.schedule_dispatch,
                self.dispatch_shell,
            ),
            copy=False,
        )

        # publish idle status
        self._publish_status('starting', 'shell')


    def record_ports(self, ports):
        """Record the ports that this kernel is using.

        The creator of the Kernel instance must call this methods if they
        want the :meth:`connect_request` method to return the port numbers.
        """
        self._recorded_ports = ports

    #---------------------------------------------------------------------------
    # Kernel request handlers
    #---------------------------------------------------------------------------

    def _publish_execute_input(self, code, parent, execution_count):
        """Publish the code request on the iopub stream."""

        self.session.send(self.iopub_socket, 'execute_input',
                          {'code':code, 'execution_count': execution_count},
                          parent=parent, ident=self._topic('execute_input')
        )

    def _publish_status(self, status, channel, parent=None):
        """send status (busy/idle) on IOPub"""
        self.session.send(self.iopub_socket,
                          'status',
                          {'execution_state': status},
                          parent=parent or self._parent_header[channel],
                          ident=self._topic('status'),
                          )
    def _publish_debug_event(self, event):
        self.session.send(self.iopub_socket,
                          'debug_event',
                          event,
                          parent=self._parent_header['control'],
                          ident=self._topic('debug_event')
        )

    def set_parent(self, ident, parent, channel='shell'):
        """Set the current parent_header

        Side effects (IOPub messages) and replies are associated with
        the request that caused them via the parent_header.

        The parent identity is used to route input_request messages
        on the stdin channel.
        """
        self._parent_ident[channel] = ident
        self._parent_header[channel] = parent

    def send_response(self, stream, msg_or_type, content=None, ident=None,
             buffers=None, track=False, header=None, metadata=None, channel='shell'):
        """Send a response to the message we're currently processing.

        This accepts all the parameters of :meth:`jupyter_client.session.Session.send`
        except ``parent``.

        This relies on :meth:`set_parent` having been called for the current
        message.
        """
        return self.session.send(stream, msg_or_type, content, self._parent_header[channel],
                                 ident, buffers, track, header, metadata)

    def init_metadata(self, parent):
        """Initialize metadata.

        Run at the beginning of execution requests.
        """
        # FIXME: `started` is part of ipyparallel
        # Remove for ipykernel 5.0
        return {
            'started': now(),
        }

    def finish_metadata(self, parent, metadata, reply_content):
        """Finish populating metadata.

        Run after completing an execution request.
        """
        return metadata

    @gen.coroutine
    def execute_request(self, stream, ident, parent):
        """handle an execute_request"""

        try:
            content = parent['content']
            code = content['code']
            silent = content['silent']
            store_history = content.get('store_history', not silent)
            user_expressions = content.get('user_expressions', {})
            allow_stdin = content.get('allow_stdin', False)
        except:
            self.log.error("Got bad msg: ")
            self.log.error("%s", parent)
            return

        stop_on_error = content.get('stop_on_error', True)

        metadata = self.init_metadata(parent)

        # Re-broadcast our input for the benefit of listening clients, and
        # start computing output
        if not silent:
            self.execution_count += 1
            self._publish_execute_input(code, parent, self.execution_count)

        reply_content = yield gen.maybe_future(
            self.do_execute(
                code, silent, store_history,
                user_expressions, allow_stdin,
            )
        )

        # Flush output before sending the reply.
        sys.stdout.flush()
        sys.stderr.flush()
        # FIXME: on rare occasions, the flush doesn't seem to make it to the
        # clients... This seems to mitigate the problem, but we definitely need
        # to better understand what's going on.
        if self._execute_sleep:
            time.sleep(self._execute_sleep)

        # Send the reply.
        reply_content = json_clean(reply_content)
        metadata = self.finish_metadata(parent, metadata, reply_content)

        reply_msg = self.session.send(stream, 'execute_reply',
                                      reply_content, parent, metadata=metadata,
                                      ident=ident)

        self.log.debug("%s", reply_msg)

        if not silent and reply_msg['content']['status'] == 'error' and stop_on_error:
            yield self._abort_queues()

    def do_execute(self, code, silent, store_history=True,
                   user_expressions=None, allow_stdin=False):
        """Execute user code. Must be overridden by subclasses.
        """
        raise NotImplementedError

    @gen.coroutine
    def complete_request(self, stream, ident, parent):
        content = parent['content']
        code = content['code']
        cursor_pos = content['cursor_pos']

        matches = yield gen.maybe_future(self.do_complete(code, cursor_pos))
        matches = json_clean(matches)
        completion_msg = self.session.send(stream, 'complete_reply',
                                           matches, parent, ident)

    def do_complete(self, code, cursor_pos):
        """Override in subclasses to find completions.
        """
        return {'matches' : [],
                'cursor_end' : cursor_pos,
                'cursor_start' : cursor_pos,
                'metadata' : {},
                'status' : 'ok'}

    @gen.coroutine
    def inspect_request(self, stream, ident, parent):
        content = parent['content']

        reply_content = yield gen.maybe_future(
            self.do_inspect(
                content['code'], content['cursor_pos'],
                content.get('detail_level', 0),
            )
        )
        # Before we send this object over, we scrub it for JSON usage
        reply_content = json_clean(reply_content)
        msg = self.session.send(stream, 'inspect_reply',
                                reply_content, parent, ident)
        self.log.debug("%s", msg)

    def do_inspect(self, code, cursor_pos, detail_level=0):
        """Override in subclasses to allow introspection.
        """
        return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False}

    @gen.coroutine
    def history_request(self, stream, ident, parent):
        content = parent['content']

        reply_content = yield gen.maybe_future(self.do_history(**content))

        reply_content = json_clean(reply_content)
        msg = self.session.send(stream, 'history_reply',
                                reply_content, parent, ident)
        self.log.debug("%s", msg)

    def do_history(self, hist_access_type, output, raw, session=None, start=None,
                   stop=None, n=None, pattern=None, unique=False):
        """Override in subclasses to access history.
        """
        return {'status': 'ok', 'history': []}

    def connect_request(self, stream, ident, parent):
        if self._recorded_ports is not None:
            content = self._recorded_ports.copy()
        else:
            content = {}
        content['status'] = 'ok'
        msg = self.session.send(stream, 'connect_reply',
                                content, parent, ident)
        self.log.debug("%s", msg)

    @property
    def kernel_info(self):
        return {
            'protocol_version': kernel_protocol_version,
            'implementation': self.implementation,
            'implementation_version': self.implementation_version,
            'language_info': self.language_info,
            'banner': self.banner,
            'help_links': self.help_links,
        }

    def kernel_info_request(self, stream, ident, parent):
        content = {'status': 'ok'}
        content.update(self.kernel_info)
        msg = self.session.send(stream, 'kernel_info_reply',
                                content, parent, ident)
        self.log.debug("%s", msg)

    def comm_info_request(self, stream, ident, parent):
        content = parent['content']
        target_name = content.get('target_name', None)

        # Should this be moved to ipkernel?
        if hasattr(self, 'comm_manager'):
            comms = {
                k: dict(target_name=v.target_name)
                for (k, v) in self.comm_manager.comms.items()
                if v.target_name == target_name or target_name is None
            }
        else:
            comms = {}
        reply_content = dict(comms=comms, status='ok')
        msg = self.session.send(stream, 'comm_info_reply',
                                reply_content, parent, ident)
        self.log.debug("%s", msg)

    @gen.coroutine
    def shutdown_request(self, stream, ident, parent):
        content = yield gen.maybe_future(self.do_shutdown(parent['content']['restart']))
        self.session.send(stream, 'shutdown_reply', content, parent, ident=ident)
        # same content, but different msg_id for broadcasting on IOPub
        self._shutdown_message = self.session.msg('shutdown_reply',
                                                  content, parent
        )

        self._at_shutdown()

        self.log.debug('Stopping control ioloop')
        control_io_loop = self.control_stream.io_loop
        control_io_loop.add_callback(control_io_loop.stop)

        self.log.debug('Stopping shell ioloop')
        shell_io_loop = self.shell_stream.io_loop
        shell_io_loop.add_callback(shell_io_loop.stop)

    def do_shutdown(self, restart):
        """Override in subclasses to do things when the frontend shuts down the
        kernel.
        """
        return {'status': 'ok', 'restart': restart}

    @gen.coroutine
    def is_complete_request(self, stream, ident, parent):
        content = parent['content']
        code = content['code']

        reply_content = yield gen.maybe_future(self.do_is_complete(code))
        reply_content = json_clean(reply_content)
        reply_msg = self.session.send(stream, 'is_complete_reply',
                                      reply_content, parent, ident)
        self.log.debug("%s", reply_msg)

    def do_is_complete(self, code):
        """Override in subclasses to find completions.
        """
        return {'status' : 'unknown',
                }

    @gen.coroutine
    def debug_request(self, stream, ident, parent):
        content = parent['content']

        reply_content = yield gen.maybe_future(self.do_debug_request(content))
        reply_content = json_clean(reply_content)
        reply_msg = self.session.send(stream, 'debug_reply', reply_content,
                                      parent, ident)
        self.log.debug("%s", reply_msg)

    @gen.coroutine
    def do_debug_request(self, msg):
        raise NotImplementedError

    #---------------------------------------------------------------------------
    # Engine methods (DEPRECATED)
    #---------------------------------------------------------------------------

    def apply_request(self, stream, ident, parent):
        self.log.warning("apply_request is deprecated in kernel_base, moving to ipyparallel.")
        try:
            content = parent['content']
            bufs = parent['buffers']
            msg_id = parent['header']['msg_id']
        except:
            self.log.error("Got bad msg: %s", parent, exc_info=True)
            return

        md = self.init_metadata(parent)

        reply_content, result_buf = self.do_apply(content, bufs, msg_id, md)

        # flush i/o
        sys.stdout.flush()
        sys.stderr.flush()

        md = self.finish_metadata(parent, md, reply_content)

        self.session.send(stream, 'apply_reply', reply_content,
                    parent=parent, ident=ident,buffers=result_buf, metadata=md)

    def do_apply(self, content, bufs, msg_id, reply_metadata):
        """DEPRECATED"""
        raise NotImplementedError

    #---------------------------------------------------------------------------
    # Control messages (DEPRECATED)
    #---------------------------------------------------------------------------

    def abort_request(self, stream, ident, parent):
        """abort a specific msg by id"""
        self.log.warning("abort_request is deprecated in kernel_base. It is only part of IPython parallel")
        msg_ids = parent['content'].get('msg_ids', None)
        if isinstance(msg_ids, str):
            msg_ids = [msg_ids]
        if not msg_ids:
            self._abort_queues()
        for mid in msg_ids:
            self.aborted.add(str(mid))

        content = dict(status='ok')
        reply_msg = self.session.send(stream, 'abort_reply', content=content,
                parent=parent, ident=ident)
        self.log.debug("%s", reply_msg)

    def clear_request(self, stream, idents, parent):
        """Clear our namespace."""
        self.log.warning("clear_request is deprecated in kernel_base. It is only part of IPython parallel")
        content = self.do_clear()
        self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
                content = content)

    def do_clear(self):
        """DEPRECATED since 4.0.3"""
        raise NotImplementedError

    #---------------------------------------------------------------------------
    # Protected interface
    #---------------------------------------------------------------------------

    def _topic(self, topic):
        """prefixed topic for IOPub messages"""
        base = "kernel.%s" % self.ident

        return py3compat.cast_bytes("%s.%s" % (base, topic))

    _aborting = Bool(False)

    @gen.coroutine
    def _abort_queues(self):
        self.shell_stream.flush()
        self._aborting = True

        def stop_aborting(f):
            self.log.info("Finishing abort")
            self._aborting = False

        self.io_loop.add_future(gen.sleep(self.stop_on_error_timeout), stop_aborting)

    def _send_abort_reply(self, stream, msg, idents):
        """Send a reply to an aborted request"""
        self.log.info("Aborting:")
        self.log.info("%s", msg)
        reply_type = msg['header']['msg_type'].rsplit('_', 1)[0] + '_reply'
        status = {'status': 'aborted'}
        md = {'engine': self.ident}
        md.update(status)
        self.session.send(
            stream, reply_type, metadata=md,
            content=status, parent=msg, ident=idents,
        )

    def _no_raw_input(self):
        """Raise StdinNotImplementedError if active frontend doesn't support
        stdin."""
        raise StdinNotImplementedError("raw_input was called, but this "
                                       "frontend does not support stdin.")

    def getpass(self, prompt='', stream=None):
        """Forward getpass to frontends

        Raises
        ------
        StdinNotImplementedError if active frontend doesn't support stdin.
        """
        if not self._allow_stdin:
            raise StdinNotImplementedError(
                "getpass was called, but this frontend does not support input requests."
            )
        if stream is not None:
            import warnings
            warnings.warn("The `stream` parameter of `getpass.getpass` will have no effect when using ipykernel",
                    UserWarning, stacklevel=2)
        return self._input_request(prompt,
            self._parent_ident['shell'],
            self._parent_header['shell'],
            password=True,
        )

    def raw_input(self, prompt=''):
        """Forward raw_input to frontends

        Raises
        ------
        StdinNotImplementedError if active frontend doesn't support stdin.
        """
        if not self._allow_stdin:
            raise StdinNotImplementedError(
                "raw_input was called, but this frontend does not support input requests."
            )
        return self._input_request(str(prompt),
            self._parent_ident['shell'],
            self._parent_header['shell'],
            password=False,
        )

    def _input_request(self, prompt, ident, parent, password=False):
        # Flush output before making the request.
        sys.stderr.flush()
        sys.stdout.flush()

        # flush the stdin socket, to purge stale replies
        while True:
            try:
                self.stdin_socket.recv_multipart(zmq.NOBLOCK)
            except zmq.ZMQError as e:
                if e.errno == zmq.EAGAIN:
                    break
                else:
                    raise

        # Send the input request.
        content = json_clean(dict(prompt=prompt, password=password))
        self.session.send(self.stdin_socket, 'input_request', content, parent,
                          ident=ident)

        # Await a response.
        while True:
            try:
                # Use polling with select() so KeyboardInterrupts can get
                # through; doing a blocking recv() means stdin reads are
                # uninterruptible on Windows. We need a timeout because
                # zmq.select() is also uninterruptible, but at least this
                # way reads get noticed immediately and KeyboardInterrupts
                # get noticed fairly quickly by human response time standards.
                rlist, _, xlist = zmq.select(
                    [self.stdin_socket], [], [self.stdin_socket], 0.01
                )
                if rlist or xlist:
                    ident, reply = self.session.recv(self.stdin_socket)
                    if (ident, reply) != (None, None):
                        break
            except KeyboardInterrupt:
                # re-raise KeyboardInterrupt, to truncate traceback
                raise KeyboardInterrupt("Interrupted by user") from None
            except Exception as e:
                self.log.warning("Invalid Message:", exc_info=True)
        
        try:
            value = py3compat.unicode_to_str(reply['content']['value'])
        except:
            self.log.error("Bad input_reply: %s", parent)
            value = ''
        if value == '\x04':
            # EOF
            raise EOFError
        return value

    def _at_shutdown(self):
        """Actions taken at shutdown by the kernel, called by python's atexit.
        """
        if self._shutdown_message is not None:
            self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown'))
            self.log.debug("%s", self._shutdown_message)
        self.control_stream.flush(zmq.POLLOUT)
コード例 #59
0
 def __init__(self, raw_headers, header_table):
     self._queue = Queue()
     self._header_table = header_table
     self._current_headers = self._header_table.merge(raw_headers)
コード例 #60
0
ファイル: try.py プロジェクト: JinkelaCrops/tornado-learning
from utils.datatool import RemoteIO

remoteio = RemoteIO("localhost")
remoteio.test("3")

from tornado.queues import Queue

q = Queue(maxsize=3)


def putq(q):
    q.put(3)


putq(q)