Ejemplo n.º 1
0
    def __init__(self, pubnub_instance):

        subscription_manager = self

        self._message_queue = Queue()
        self._consumer_event = Event()
        self._cancellation_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = TornadoReconnectionManager(pubnub_instance)

        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class TornadoReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = TornadoReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)
Ejemplo n.º 2
0
        def __init__(self, max_concurrent_batches=10, block_on_send=False,
                    block_on_response=False, max_batch_size=100, send_frequency=0.25,
                    user_agent_addition=''):
            if not has_tornado:
                raise ImportError('TornadoTransmission requires tornado, but it was not found.')

            self.block_on_send = block_on_send
            self.block_on_response = block_on_response
            self.max_batch_size = max_batch_size
            self.send_frequency = send_frequency

            user_agent = "libhoney-py/" + VERSION
            if user_agent_addition:
                user_agent += " " + user_agent_addition

            self.http_client = AsyncHTTPClient(
                force_instance=True,
                defaults=dict(user_agent=user_agent))

            # libhoney adds events to the pending queue for us to send
            self.pending = Queue(maxsize=1000)
            # we hand back responses from the API on the responses queue
            self.responses = Queue(maxsize=2000)

            self.batch_data = {}
            self.sd = statsd.StatsClient(prefix="libhoney")
            self.batch_sem = Semaphore(max_concurrent_batches)
Ejemplo n.º 3
0
    def __init__(self, device_server, stream, address):
        self.fw_version = 0.0
        self.recv_msg_cond = Condition()
        self.recv_msg = {}
        self.send_msg_sem = Semaphore(1)
        self.pending_request_cnt = 0
        self.device_server = device_server
        self.stream = stream
        self.address = address
        self.stream.set_nodelay(True)
        self.timeout_handler_onlinecheck = None
        self.timeout_handler_offline = None
        self.killed = False
        self.sn = ""
        self.private_key = ""
        self.node_id = 0
        self.iv = None
        self.cipher_down = None
        self.cipher_up = None

        #self.state_waiters = []
        #self.state_happened = []

        self.event_waiters = []
        self.event_happened = []

        self.ota_ing = False
        self.ota_notify_done_future = None
        self.post_ota = False
        self.online_status = True
Ejemplo n.º 4
0
def setup_handler(
    pairs_path,
    nonpairs_path,
    lang_names,
    missing_freqs_path,
    timeout,
    max_pipes_per_pair,
    min_pipes_per_pair,
    max_users_per_pipe,
    max_idle_secs,
    restart_pipe_after,
    max_doc_pipes,
    verbosity=0,
    scale_mt_logs=False,
    memory=1000,
    apy_keys=None,
):

    global missing_freqs_db
    if missing_freqs_path:
        missing_freqs_db = missingdb.MissingDb(missing_freqs_path, memory)

    handler = BaseHandler
    handler.lang_names = lang_names
    handler.timeout = timeout
    handler.max_pipes_per_pair = max_pipes_per_pair
    handler.min_pipes_per_pair = min_pipes_per_pair
    handler.max_users_per_pipe = max_users_per_pipe
    handler.max_idle_secs = max_idle_secs
    handler.restart_pipe_after = restart_pipe_after
    handler.scale_mt_logs = scale_mt_logs
    handler.verbosity = verbosity
    handler.doc_pipe_sem = Semaphore(max_doc_pipes)
    handler.api_keys_conf = apy_keys

    modes = search_path(pairs_path, verbosity=verbosity)
    if nonpairs_path:
        src_modes = search_path(nonpairs_path,
                                include_pairs=False,
                                verbosity=verbosity)
        for mtype in modes:
            modes[mtype] += src_modes[mtype]

    for mtype in modes:
        logging.info('%d %s modes found', len(modes[mtype]), mtype)

    for path, lang_src, lang_trg in modes['pair']:
        handler.pairs['%s-%s' % (lang_src, lang_trg)] = path
    for dirpath, modename, lang_pair in modes['analyzer']:
        handler.analyzers[lang_pair] = (dirpath, modename)
    for dirpath, modename, lang_pair in modes['generator']:
        handler.generators[lang_pair] = (dirpath, modename)
    for dirpath, modename, lang_pair in modes['tagger']:
        handler.taggers[lang_pair] = (dirpath, modename)
    for dirpath, modename, lang_src in modes['spell']:
        if (any(lang_src == elem[2] for elem in modes['tokenise'])):
            handler.spellers[lang_src] = (dirpath, modename)

    handler.init_pairs_graph()
    handler.init_paths()
Ejemplo n.º 5
0
Archivo: ipc.py Proyecto: bryson/salt
 def __singleton_init__(self, socket_path, io_loop=None):
     super(IPCMessageSubscriber, self).__singleton_init__(
         socket_path, io_loop=io_loop)
     self._read_sync_future = None
     self._read_stream_future = None
     self._sync_ioloop_running = False
     self.saved_data = []
     self._sync_read_in_progress = Semaphore()
Ejemplo n.º 6
0
    def __init__(self, db_uri, project_id, admin_uid, client_id, client_secret,
                 api_key, api_rq_interval, domain, secure, static_uri,
                 static_path, thread_count, crawler_config, **kwargs):
        self._log = extdlog.getLogger(self.__class__.__name__)
        # Database connection
        self._db = Database(db_uri, log=self._log.getChild('db'))
        # Session management connection
        self._pool = WorkerPool(thread_count)
        self._hasher = ImageHasher(self._log.getChild('hasher'), self._pool)
        AsyncHTTPClient.configure(
            None,
            defaults=dict(
                user_agent=
                "HADSH/0.0.1 (https://hackaday.io/project/29161-hackadayio-spambot-hunter-project)"
            ))
        self._api = HackadayAPI(client_id=client_id,
                                client_secret=client_secret,
                                api_key=api_key,
                                rqlim_time=api_rq_interval,
                                client=AsyncHTTPClient(),
                                log=self._log.getChild('api'))
        self._crawler = Crawler(project_id,
                                admin_uid,
                                self._db,
                                self._api,
                                self._hasher,
                                self._log.getChild('crawler'),
                                config=crawler_config)
        self._resizer = ImageResizer(self._log.getChild('resizer'), self._pool)
        self._domain = domain
        self._secure = secure
        self._classify_sem = Semaphore(1)

        self._crypt_context = CryptContext(['argon2', 'scrypt', 'bcrypt'])

        # Initialise traits
        init_traits(self, self._log.getChild('trait'))

        super(HADSHApp, self).__init__([
            (r"/", RootHandler),
            (r"/login", LoginHandler),
            (r"/avatar/([0-9]+)", AvatarHandler),
            (r"/avatar/(average_hash|dhash|phash|whash|sha512)/([0-9]+)", \
                    AvatarHashHandler),
            (r"/user/([0-9]+)", UserHandler),
            (r"/callback", CallbackHandler),
            (r"/classify/([0-9]+)", ClassifyHandler),
            (r"/data/newcomers.json", NewcomerDataHandler),
            (r"/data/legit.json", LegitUserDataHandler),
            (r"/data/suspect.json", SuspectUserDataHandler),
            (r"/data/admin.json", AdminUserDataHandler),
            (r"/authorize", RedirectHandler, {
                "url": self._api.auth_uri
            }),
        ],
        static_url_prefix=static_uri,
        static_path=static_path,
        **kwargs)
Ejemplo n.º 7
0
    def __init__(self, trunk_id, name, max_lines, tag, direction):
	self.id        = trunk_id
	self.name      = name
	self.max_lines = trun
	self.tag       = tag
	self.direction = direction
	self.lines     = { l: {} for l in range(0, max_lines) }
	self.semaphore = Semaphore(1)
	self.callers   = {}
	self.counters  = 
Ejemplo n.º 8
0
    def __init__(self, start_link=None):
        self._init_defaults()
        # Now load the config file to override defaults
        self._load_config()

        if start_link:
            self.start_link = start_link
        if not self.start_link:
            raise SystemExit("No start link is provided, exiting now...")
        links.put(self.start_link)
        self.semaphore = Semaphore(self.workers_limit)
Ejemplo n.º 9
0
    def __init__(self, prespawn_count, kernel_manager):
        # Make sure there's at least one kernel as a delegate
        if not prespawn_count:
            prespawn_count = 1

        super(ManagedKernelPool, self).__init__(prespawn_count, kernel_manager)

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.kernel_pool = []

        kernel_ids = self.kernel_manager.list_kernel_ids()
        self.kernel_semaphore = Semaphore(len(kernel_ids))

        # Create clients and iopub handlers for prespawned kernels
        for kernel_id in kernel_ids:
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(
                kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))
Ejemplo n.º 10
0
class AsyncModbusGeneratorClient(AsyncModbusSerialClient):
    def __init__(self, method='ascii', **kwargs):
        super(AsyncModbusGeneratorClient, self).__init__(method=method,
                                                         **kwargs)
        self.sem = Semaphore(1)

    @gen.coroutine
    def read_input_registers(self, address, count=1, **kwargs):
        fut_result = Future()
        request = ReadInputRegistersRequest(address, count, **kwargs)
        yield self.sem.acquire()
        try:
            res = self.execute(request)
            res.addCallback(fut_result.set_result)
            yield fut_result
        finally:
            self.sem.release()
        raise gen.Return(fut_result.result())

    @gen.coroutine
    def write_coil(self, address, value, **kwargs):
        fut_result = Future()
        request = WriteSingleCoilRequest(address, value, **kwargs)
        yield self.sem.acquire()
        try:
            res = self.execute(request)
            res.addCallback(fut_result.set_result)
            yield fut_result
        finally:
            self.sem.release()
        raise gen.Return(fut_result.result())

    @gen.coroutine
    def write_register(self, address, value, **kwargs):
        fut_result = Future()
        request = WriteSingleRegisterRequest(address, value, **kwargs)
        yield self.sem.acquire()
        try:
            res = self.execute(request)
            res.addCallback(fut_result.set_result)
            yield fut_result
        finally:
            self.sem.release()
        raise gen.Return(fut_result.result())
Ejemplo n.º 11
0
class Worker(object):
    def __init__(self, max_queue=0, io_loop=None):
        self.io_loop = io_loop or IOLoop.current()
        self.active = {}
        self.sem = Semaphore(value=max_queue)
        self.log = logging.getLogger("pizzadelivery.downloader")

    def _add_to_active(self, task, future):
        self.active[self.task_to_id(task)] = future

    def _in_active(self, task):
        return self.task_to_id(task) in self.active

    def _remove_from_active(self, task):
        del self.active[self.task_to_id(task)]

    def _get_future_for_task(self, task):
        return self.active[self.task_to_id(task)]

    def enqueue(self, task):
        if self._in_active(task):
            future = concurrent.Future()
            concurrent.chain_future(self._get_future_for_task(task), future)
            return future

        future = concurrent.Future()
        self._add_to_active(task, future)
        concurrent.chain_future(self._do(task), future)
        return future

    @gen.coroutine
    def _do(self, task):
        assert self._in_active(task)
        try:
            with (yield self.sem.acquire()):
                res = yield gen.maybe_future(self.do(task))
                raise gen.Return(res)
        finally:
            self._remove_from_active(task)

    @gen.coroutine
    def do(self, task):
        raise NotImplementedError  # pragma: no cover

    def task_to_id(self, task):
        return task
Ejemplo n.º 12
0
def extract_proxies_async(requests_proxies):
    """
    :rtype: {'http://123.169.238.33:8888', ...}
    """

    SEMA = Semaphore(CONCURRENT_NUM)

    @gen.coroutine
    def worker(instance, idx, item, requests_proxies):
        with (yield SEMA.acquire()):
            proxies = instance.parse_proxies_async(item)
            yield instance.async_http(proxies, idx, requests_proxies)

    instance = ExtractProxies.instance()
    raw_proxies = instance.get_proxies()
    if raw_proxies is not None:
        yield [worker(instance, idx, item, requests_proxies) \
            for idx, item in enumerate(raw_proxies)]
Ejemplo n.º 13
0
    def __init__(self, prespawn_count, kernel_manager):
        # Make sure there's at least one kernel as a delegate
        if not prespawn_count:
            prespawn_count = 1

        super(ManagedKernelPool, self).__init__(prespawn_count, kernel_manager)

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.kernel_pool = []

        kernel_ids = self.kernel_manager.list_kernel_ids()
        self.kernel_semaphore = Semaphore(len(kernel_ids))

        # Create clients and iopub handlers for prespawned kernels
        for kernel_id in kernel_ids:
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))
Ejemplo n.º 14
0
    def __init__(self, prespawn_count, kernel_manager):

        if prespawn_count is None:
            prespawn_count = 0

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.kernel_manager = kernel_manager
        self.pool_index = 0
        self.kernel_pool = []
        self.kernel_semaphore = Semaphore(prespawn_count)

        for _ in range(prespawn_count):
            if self.kernel_manager.parent.seed_notebook:
                kernel_id = kernel_manager.start_kernel(kernel_name=self.kernel_manager.parent.seed_notebook['metadata']['kernelspec']['name'])
            else:
                kernel_id = kernel_manager.start_kernel()
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))
Ejemplo n.º 15
0
    def get_resized(self, gallery, photo,
            width=None, height=None, quality=60,
            rotation=0.0, img_format=None, orientation=0):
        """
        Retrieve the given photo in a resized format.
        """
        # Determine the path to the original file.
        orig_node = self._fs_node.join_node(gallery, photo)

        if img_format is None:
            # Detect from original file and quality setting.
            with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:
                mime_type = m.id_filename(orig_node.abs_path)
                self._log.debug('%s/%s detected format %s',
                        gallery, photo, mime_type)
                if mime_type == 'image/gif':
                    img_format = ImageFormat.GIF
                else:
                    if quality == 100:
                        # Assume PNG
                        img_format = ImageFormat.PNG
                    else:
                        # Assume JPEG
                        img_format = ImageFormat.JPEG
        else:
            # Use the format given by the user
            img_format = ImageFormat(img_format)

        self._log.debug('%s/%s using %s format',
                gallery, photo, img_format.name)

        # Sanitise dimensions given by user.
        width, height = self.get_dimensions(gallery, photo, width, height)
        self._log.debug('%s/%s target dimensions %d by %d',
                gallery, photo, width, height)

        # Determine where the file would be cached
        (cache_dir, cache_name) = self._get_cache_name(gallery, photo,
                width,height, quality, rotation, img_format)

        # Do we have this file?
        data = self._read_cache(orig_node, cache_dir, cache_name)
        if data is not None:
            raise Return((img_format, cache_name, data))

        # Locate the lock for this photo.
        mutex_key = (gallery, photo, width, height, quality, rotation,
                img_format)
        try:
            mutex = self._mutexes[mutex_key]
        except KeyError:
            mutex = Semaphore(1)
            self._mutexes[mutex_key] = mutex

        resize_args = (gallery, photo, width, height, quality,
                    rotation, img_format.value, orientation)
        try:
            self._log.debug('%s/%s waiting for mutex',
                    gallery, photo)
            yield mutex.acquire()

            # We have the semaphore, call our resize routine.
            self._log.debug('%s/%s retrieving resized image (args=%s)',
                    gallery, photo, resize_args)
            (img_format, file_name, file_data) = yield self._pool.apply(
                func=self._do_resize,
                args=resize_args)
            raise Return((img_format, file_name, file_data))
        except Return:
            raise
        except:
            self._log.exception('Error resizing photo; gallery: %s, photo: %s, '\
                    'width: %d, height: %d, quality: %f, rotation: %f, format: %s',
                    gallery, photo, width, height, quality, rotation, img_format)
            raise
        finally:
            mutex.release()
Ejemplo n.º 16
0
class ManagedKernelPool(KernelPool):
    """Spawns a pool of kernels that are treated as identical delegates for
    future requests.

    Manages access to individual kernels using a borrower/lender pattern.
    Cleans them all up when shut down.

    Parameters
    ----------
    prespawn_count
        Number of kernels to spawn immediately
    kernel_manager
        Kernel manager instance

    Attributes
    ----------
    kernel_clients : dict
        Map of kernel IDs to client instances for communicating with them
    on_recv_funcs : dict
        Map of kernel IDs to iopub callback functions
    kernel_pool : list
        List of available delegate kernel IDs
    kernel_semaphore : tornado.locks.Semaphore
        Semaphore that controls access to the kernel pool
    """
    def __init__(self, prespawn_count, kernel_manager):
        # Make sure there's at least one kernel as a delegate
        if not prespawn_count:
            prespawn_count = 1

        super(ManagedKernelPool, self).__init__(prespawn_count, kernel_manager)

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.kernel_pool = []

        kernel_ids = self.kernel_manager.list_kernel_ids()
        self.kernel_semaphore = Semaphore(len(kernel_ids))

        # Create clients and iopub handlers for prespawned kernels
        for kernel_id in kernel_ids:
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(
                kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))

    @gen.coroutine
    def acquire(self):
        """Gets a kernel client and removes it from the available pool of
        clients.

        Returns
        -------
        tuple
            Kernel client instance, kernel ID
        """
        yield self.kernel_semaphore.acquire()
        kernel_id = self.kernel_pool[0]
        del self.kernel_pool[0]
        raise gen.Return((self.kernel_clients[kernel_id], kernel_id))

    def release(self, kernel_id):
        """Puts a kernel back into the pool of kernels available to handle
        requests.

        Parameters
        ----------
        kernel_id : str
            Kernel to return to the pool
        """
        self.kernel_pool.append(kernel_id)
        self.kernel_semaphore.release()

    def _on_reply(self, kernel_id, msg_list):
        """Invokes the iopub callback registered for the `kernel_id` and passes
        it a deserialized list of kernel messsages.

        Parameters
        ----------
        kernel_id : str
            Kernel that sent the reply
        msg_list : list
            List of 0mq messages
        """
        idents, msg_list = self.kernel_clients[
            kernel_id].session.feed_identities(msg_list)
        msg = self.kernel_clients[kernel_id].session.deserialize(msg_list)
        self.on_recv_funcs[kernel_id](msg)

    def create_on_reply(self, kernel_id):
        """Creates an anonymous function to handle reply messages from the
        kernel.

        Parameters
        ----------
        kernel_id
            Kernel to listen to

        Returns
        -------
        function
            Callback function taking a kernel ID and 0mq message list
        """
        return lambda msg_list: self._on_reply(kernel_id, msg_list)

    def on_recv(self, kernel_id, func):
        """Registers a callback function for iopub messages from a particular
        kernel.

        This is needed to avoid having multiple callbacks per kernel client.

        Parameters
        ----------
        kernel_id
            Kernel from which to receive iopub messages
        func
            Callback function to use for kernel iopub messages
        """
        self.on_recv_funcs[kernel_id] = func

    def shutdown(self):
        """Shuts down all kernels and their clients.
        """
        for kid in self.kernel_clients:
            self.kernel_clients[kid].stop_channels()
            self.kernel_manager.shutdown_kernel(kid, now=True)

        # Any remaining kernels that were not created for our pool should be shutdown
        super(ManagedKernelPool, self).shutdown()
Ejemplo n.º 17
0
#!urs/bin/env python
#coding:utf-8


# Semaphore:一个信号量管理着代表release调用次数减去acquire的调用次数的计数器加一个
# 初始值。如果必要的话,acquire方法将会阻塞,直到它可以返回,而不使该计数器成为负值。
# 信号量限制访问共享资源,为了允许两个worker同时获得权限,代码如下。


from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Semaphore
sem = Semaphore(2)


@gen.coroutine
def worker(worker_id):
    with (yield sem.acquire()): # acquire()是一个上下文管理器
        print("Worker %d is working" % worker_id)
    print("Worker %d is done" % worker_id)
    sem.release()
    
#     yield sem.acquire()
#     try:
#         print('Worker %d is working' % worker_id)
# #         yield use_some_resource()
#     finally:
#         print('Worker %d is done' % worker_id)
#         sem.release()
        
@gen.coroutine
Ejemplo n.º 18
0
class TornadoSubscriptionManager(SubscriptionManager):
    def __init__(self, pubnub_instance):
        self._message_queue = Queue()
        self._consumer_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._cancellation_event = None
        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

    def _set_consumer_event(self):
        self._consumer_event.set()

    def _message_queue_put(self, message):
        self._message_queue.put(message)

    def _start_worker(self):
        self._consumer = TornadoSubscribeMessageWorker(self._pubnub,
                                                       self._listener_manager,
                                                       self._message_queue,
                                                       self._consumer_event)
        run = stack_context.wrap(self._consumer.run)
        self._pubnub.ioloop.spawn_callback(run)

    def reconnect(self):
        self._should_stop = False
        self._pubnub.ioloop.add_callback(self._start_subscribe_loop)
        self._register_heartbeat_timer()

    @tornado.gen.coroutine
    def _start_subscribe_loop(self):
        try:
            self._stop_subscribe_loop()

            yield self._subscription_lock.acquire()

            self._cancellation_event = Event()

            combined_channels = self._subscription_state.prepare_channel_list(True)
            combined_groups = self._subscription_state.prepare_channel_group_list(True)

            if len(combined_channels) == 0 and len(combined_groups) == 0:
                return

            envelope_future = Subscribe(self._pubnub) \
                .channels(combined_channels).channel_groups(combined_groups) \
                .timetoken(self._timetoken).region(self._region) \
                .filter_expression(self._pubnub.config.filter_expression) \
                .cancellation_event(self._cancellation_event) \
                .future()

            wi = tornado.gen.WaitIterator(
                envelope_future,
                self._cancellation_event.wait())

            while not wi.done():
                try:
                    result = yield wi.next()
                except Exception as e:
                    logger.error(e)
                    raise
                else:
                    if wi.current_future == envelope_future:
                        envelope = result
                    elif wi.current_future == self._cancellation_event.wait():
                        break

                    self._handle_endpoint_call(envelope.result, envelope.status)
                    self._start_subscribe_loop()
        except PubNubTornadoException as e:
            if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
                self._pubnub.ioloop.add_callback(self._start_subscribe_loop)
            else:
                self._listener_manager.announce_status(e.status)
        except Exception as e:
            logger.error(e)
            raise
        finally:
            self._cancellation_event.set()
            yield tornado.gen.moment
            self._cancellation_event = None
            self._subscription_lock.release()

    def _stop_subscribe_loop(self):
        if self._cancellation_event is not None:
            self._cancellation_event.set()

    def _stop_heartbeat_timer(self):
        if self._heartbeat_periodic_callback is not None:
            self._heartbeat_periodic_callback.stop()

    def _register_heartbeat_timer(self):
        super(TornadoSubscriptionManager, self)._register_heartbeat_timer()

        self._heartbeat_periodic_callback = PeriodicCallback(
            stack_context.wrap(self._perform_heartbeat_loop),
            self._pubnub.config.heartbeat_interval *
            TornadoSubscriptionManager.HEARTBEAT_INTERVAL_MULTIPLIER,
            self._pubnub.ioloop)
        self._heartbeat_periodic_callback.start()

    @tornado.gen.coroutine
    def _perform_heartbeat_loop(self):
        if self._heartbeat_call is not None:
            # TODO: cancel call
            pass

        cancellation_event = Event()
        state_payload = self._subscription_state.state_payload()
        presence_channels = self._subscription_state.prepare_channel_list(False)
        presence_groups = self._subscription_state.prepare_channel_group_list(False)

        if len(presence_channels) == 0 and len(presence_groups) == 0:
            return

        try:
            envelope = yield self._pubnub.heartbeat() \
                .channels(presence_channels) \
                .channel_groups(presence_groups) \
                .state(state_payload) \
                .cancellation_event(cancellation_event) \
                .future()

            heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options
            if envelope.status.is_error:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \
                        heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_stateus(envelope.status)
            else:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_stateus(envelope.status)

        except PubNubTornadoException:
            pass
            # TODO: check correctness
            # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
            #     self._start_subscribe_loop()
            # else:
            #     self._listener_manager.announce_status(e.status)
        finally:
            cancellation_event.set()

    @tornado.gen.coroutine
    def _send_leave(self, unsubscribe_operation):
        envelope = yield Leave(self._pubnub) \
            .channels(unsubscribe_operation.channels) \
            .channel_groups(unsubscribe_operation.channel_groups).future()
        self._listener_manager.announce_status(envelope.status)
Ejemplo n.º 19
0
def cull_idle(
    url,
    api_token,
    inactive_limit,
    protected_users,
    cull_users=False,
    max_age=0,
    concurrency=10,
):
    """Shutdown idle single-user servers
    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        "Authorization": "token %s" % api_token,
    }
    req = HTTPRequest(
        url=url + "/users",
        headers=auth_header,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode("utf8", "replace"))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server
        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user["name"]
        if server_name:
            log_name = "%s/%s" % (user["name"], server_name)
        if server.get("pending"):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server["pending"])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get("ready", bool(server["url"])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get("started"):
            age = now - parse_date(server["started"])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server["last_activity"]:
            inactive = now - parse_date(server["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user["name"]),
                quote(server["name"]),
            )
        else:
            delete_url = url + "/users/%s/server" % quote(user["name"])

        req = HTTPRequest(
            url=delete_url,
            method="DELETE",
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.
        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if "servers" in user:
            servers = user["servers"]
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user["server"]:
                servers[""] = {
                    "last_activity": user["last_activity"],
                    "pending": user["pending"],
                    "url": user["server"],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user["name"],
                still_alive,
            )
            return False

        should_cull = False
        if user.get("created"):
            age = now - parse_date(user["created"])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user["last_activity"]:
            inactive = now - parse_date(user["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user["name"],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user["name"],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user["name"],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(
            url=url + "/users/%s" % user["name"],
            method="DELETE",
            headers=auth_header,
        )
        yield fetch(req)
        return True

    p_users = []
    # Don't kill protected servers
    if protected_users is not None:
        # Expects a list of eithers users or files that contains users
        for protected in protected_users.split(","):
            if os.path.exists(protected):
                db_path = protected
                try:
                    app_log.info("Cull, checking db {} "
                                 "for users".format(db_path))
                    file_users = []
                    with open(db_path, "r") as db:
                        file_users = [
                            user.rstrip("\n").rstrip("\r\n") for user in db
                        ]
                    p_users.extend(file_users)
                except IOError as err:
                    app_log.error("Cull, tried to open db file {},"
                                  "Failed {}".format(db_path, err))
            else:
                p_users.append(protected)
        users = [user for user in users if user["name"] not in p_users]

    for user in users:
        futures.append((user["name"], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Ejemplo n.º 20
0
 def __init__(self, max_queue=0, io_loop=None):
     self.io_loop = io_loop or IOLoop.current()
     self.active = {}
     self.sem = Semaphore(value=max_queue)
     self.log = logging.getLogger("pizzadelivery.downloader")
Ejemplo n.º 21
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(url=url + '/users', headers=auth_header)
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url, method='DELETE', headers=auth_header)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user['name'],
                still_alive,
            )
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user['name'],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user['name'],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(url=url + '/users/%s' % user['name'],
                          method='DELETE',
                          headers=auth_header)
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Ejemplo n.º 22
0
class ManagedKernelPool(KernelPool):
    '''
    Spawns a pool of kernels. Manages access to individual kernels using a
    borrower/lender pattern. Cleans them all up when shut down.
    '''
    def __init__(self, prespawn_count, kernel_manager):
        # Make sure there's at least one kernel as a delegate
        if not prespawn_count:
            prespawn_count = 1

        super(ManagedKernelPool, self).__init__(prespawn_count, kernel_manager)

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.pool_index = 0
        self.kernel_pool = []

        kernel_ids = self.kernel_manager.list_kernel_ids()
        self.kernel_semaphore = Semaphore(len(kernel_ids))

        # Connect to any prespawned kernels
        for kernel_id in kernel_ids:
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))

    @gen.coroutine
    def acquire(self):
        '''
        Returns a kernel client and id for use and removes the kernel the resource pool.
        Kernels must be returned via the release method.
        :return: Returns a kernel client and a kernel id
        '''
        yield self.kernel_semaphore.acquire()
        kernel_id = self.kernel_pool[0]
        del self.kernel_pool[0]
        raise gen.Return((self.kernel_clients[kernel_id], kernel_id))

    def release(self, kernel_id):
        '''
        Returns a kernel back to the resource pool.
        :param kernel_id: Id of the kernel to return to the pool
        '''
        self.kernel_pool.append(kernel_id)
        self.kernel_semaphore.release()

    def _on_reply(self, kernel_id, msg_list):
        idents, msg_list = self.kernel_clients[kernel_id].session.feed_identities(msg_list)
        msg = self.kernel_clients[kernel_id].session.deserialize(msg_list)
        self.on_recv_funcs[kernel_id](msg)

    def create_on_reply(self, kernel_id):
        '''
        The lambda is used to handle a specific reply per kernel and provide a unique stack scope per invocation.
        '''
        return lambda msg_list: self._on_reply(kernel_id, msg_list)

    def on_recv(self, kernel_id, func):
        '''
        Registers a callback for io_pub messages for a particular kernel.
        This is needed to avoid having multiple callbacks per kernel client.
        :param kernel_id: Id of the kernel
        :param func: Callback function to handle the message
        '''
        self.on_recv_funcs[kernel_id] = func

    def shutdown(self):
        '''
        Shuts down all kernels in the pool and in the kernel manager.
        '''
        for kid in self.kernel_clients:
            self.kernel_clients[kid].stop_channels()
            self.kernel_manager.shutdown_kernel(kid, now=True)

        # Any remaining kernels that were not created for our pool should be shutdown
        super(ManagedKernelPool, self).shutdown()
Ejemplo n.º 23
0
    class TornadoTransmission():
        def __init__(self, max_concurrent_batches=10, block_on_send=False,
                    block_on_response=False, max_batch_size=100, send_frequency=0.25,
                    user_agent_addition=''):
            if not has_tornado:
                raise ImportError('TornadoTransmission requires tornado, but it was not found.')

            self.block_on_send = block_on_send
            self.block_on_response = block_on_response
            self.max_batch_size = max_batch_size
            self.send_frequency = send_frequency

            user_agent = "libhoney-py/" + VERSION
            if user_agent_addition:
                user_agent += " " + user_agent_addition

            self.http_client = AsyncHTTPClient(
                force_instance=True,
                defaults=dict(user_agent=user_agent))

            # libhoney adds events to the pending queue for us to send
            self.pending = Queue(maxsize=1000)
            # we hand back responses from the API on the responses queue
            self.responses = Queue(maxsize=2000)

            self.batch_data = {}
            self.sd = statsd.StatsClient(prefix="libhoney")
            self.batch_sem = Semaphore(max_concurrent_batches)

        def start(self):
            ioloop.IOLoop.current().spawn_callback(self._sender)

        def send(self, ev):
            '''send accepts an event and queues it to be sent'''
            self.sd.gauge("queue_length", self.pending.qsize())
            try:
                if self.block_on_send:
                    self.pending.put(ev)
                else:
                    self.pending.put_nowait(ev)
                self.sd.incr("messages_queued")
            except QueueFull:
                response = {
                    "status_code": 0,
                    "duration": 0,
                    "metadata": ev.metadata,
                    "body": "",
                    "error": "event dropped; queue overflow",
                }
                if self.block_on_response:
                    self.responses.put(response)
                else:
                    try:
                        self.responses.put_nowait(response)
                    except QueueFull:
                        # if the response queue is full when trying to add an event
                        # queue is full response, just skip it.
                        pass
                self.sd.incr("queue_overflow")

        # We're using the older decorator/yield model for compatibility with
        # Python versions before 3.5.
        # See: http://www.tornadoweb.org/en/stable/guide/coroutines.html#python-3-5-async-and-await
        @gen.coroutine
        def _sender(self):
            '''_sender is the control loop that pulls events off the `self.pending`
            queue and submits batches for actual sending. '''
            events = []
            last_flush = time.time()
            while True:
                try:
                    ev = yield self.pending.get(timeout=self.send_frequency)
                    if ev is None:
                        # signals shutdown
                        yield self._flush(events)
                        return
                    events.append(ev)
                    if (len(events) > self.max_batch_size or
                        time.time() - last_flush > self.send_frequency):
                        yield self._flush(events)
                        events = []
                except TimeoutError:
                    yield self._flush(events)
                    events = []
                    last_flush = time.time()

        @gen.coroutine
        def _flush(self, events):
            if not events:
                return
            for dest, group in group_events_by_destination(events).items():
                yield self._send_batch(dest, group)

        @gen.coroutine
        def _send_batch(self, destination, events):
            ''' Makes a single batch API request with the given list of events. The
            `destination` argument contains the write key, API host and dataset
            name used to build the request.'''
            start = time.time()
            status_code = 0

            try:
                # enforce max_concurrent_batches
                yield self.batch_sem.acquire()
                url = urljoin(urljoin(destination.api_host, "/1/batch/"),
                            destination.dataset)
                payload = []
                for ev in events:
                    event_time = ev.created_at.isoformat()
                    if ev.created_at.tzinfo is None:
                        event_time += "Z"
                    payload.append({
                        "time": event_time,
                        "samplerate": ev.sample_rate,
                        "data": ev.fields()})
                req = HTTPRequest(
                    url,
                    method='POST',
                    headers={
                        "X-Honeycomb-Team": destination.writekey,
                        "Content-Type": "application/json",
                    },
                    body=json.dumps(payload, default=json_default_handler),
                )
                self.http_client.fetch(req, self._response_callback)
                # store the events that were sent so we can process responses later
                # it is important that we delete these eventually, or we'll run into memory issues
                self.batch_data[req] = {"start": start, "events": events}
            except Exception as e:
                # Catch all exceptions and hand them to the responses queue.
                self._enqueue_errors(status_code, e, start, events)
            finally:
                self.batch_sem.release()

        def _enqueue_errors(self, status_code, error, start, events):
            for ev in events:
                self.sd.incr("send_errors")
                self._enqueue_response(status_code, "", error, start, ev.metadata)

        def _response_callback(self, resp):
            # resp.request should be the same HTTPRequest object built by _send_batch
            # and mapped to values in batch_data
            events = self.batch_data[resp.request]["events"]
            start  = self.batch_data[resp.request]["start"]
            try:
                status_code = resp.code
                resp.rethrow()

                statuses = [d["status"] for d in json.loads(resp.body)]
                for ev, status in zip(events, statuses):
                    self._enqueue_response(status, "", None, start, ev.metadata)
                    self.sd.incr("messages_sent")
            except Exception as e:
                self._enqueue_errors(status_code, e, start, events)
                self.sd.incr("send_errors")
            finally:
                # clean up the data for this batch
                del self.batch_data[resp.request]

        def _enqueue_response(self, status_code, body, error, start, metadata):
            resp = {
                "status_code": status_code,
                "body": body,
                "error": error,
                "duration": (time.time() - start) * 1000,
                "metadata": metadata
            }
            if self.block_on_response:
                self.responses.put(resp)
            else:
                try:
                    self.responses.put_nowait(resp)
                except QueueFull:
                    pass

        def close(self):
            '''call close to send all in-flight requests and shut down the
                senders nicely. Times out after max 20 seconds per sending thread
                plus 10 seconds for the response queue'''
            try:
                self.pending.put(None, 10)
            except QueueFull:
                pass
            # signal to the responses queue that nothing more is coming.
            try:
                self.responses.put(None, 10)
            except QueueFull:
                pass

        def get_response_queue(self):
            ''' return the responses queue on to which will be sent the response
            objects from each event send'''
            return self.responses
Ejemplo n.º 24
0
class KernelPool(object):
    '''
    A class to maintain a pool of kernel and control access to the individual kernels.
    Kernels are protected by a borrower/lender pattern.
    '''
    def __init__(self, prespawn_count, kernel_manager):

        if prespawn_count is None:
            prespawn_count = 0

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.kernel_manager = kernel_manager
        self.pool_index = 0
        self.kernel_pool = []
        self.kernel_semaphore = Semaphore(prespawn_count)

        for _ in range(prespawn_count):
            if self.kernel_manager.parent.seed_notebook:
                kernel_id = kernel_manager.start_kernel(kernel_name=self.kernel_manager.parent.seed_notebook['metadata']['kernelspec']['name'])
            else:
                kernel_id = kernel_manager.start_kernel()
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))

    @gen.coroutine
    def acquire(self):
        '''
        Returns a kernel client and id for use and removes the kernel the resource pool.
        Kernels must be returned via the release method.
        :return:Returns a kernel client and a kernel id
        '''
        yield self.kernel_semaphore.acquire()
        kernel_id = self.kernel_pool[0]
        del self.kernel_pool[0]
        raise gen.Return((self.kernel_clients[kernel_id], kernel_id))

    def release(self, kernel_id):
        '''
        Returns a kernel back to the resource pool.
        :param kernel_id: Id of the kernel to return to the pool
        '''
        self.kernel_pool.append(kernel_id)
        self.kernel_semaphore.release()

    def _on_reply(self, kernel_id, msg_list):
        idents, msg_list = self.kernel_clients[kernel_id].session.feed_identities(msg_list)
        msg = self.kernel_clients[kernel_id].session.deserialize(msg_list)
        self.on_recv_funcs[kernel_id](msg)

    def create_on_reply(self, kernel_id):
        '''
        The lambda is used to handle a specific reply per kernel and provide a unique stack scope per invocation.
        '''
        return lambda msg_list: self._on_reply(kernel_id, msg_list)

    def on_recv(self, kernel_id, func):
        '''
        Registers a callback for io_pub messages for a particular kernel.
        This is needed to avoid having multiple callbacks per kernel client.
        :param kernel_id: Id of the kernel
        :param func: Callback function to handle the message
        '''
        self.on_recv_funcs[kernel_id] = func

    def shutdown(self):
        '''
        Shuts down all kernels in the pool and in the kernel manager.
        '''
        for kid in self.kernel_clients:
            self.kernel_clients[kid].stop_channels()
            self.kernel_manager.shutdown_kernel(kid, now=True)

        # Any remaining kernels that were not created for our pool should be shutdown
        kids = self.kernel_manager.list_kernel_ids()
        for kid in kids:
            self.kernel_manager.shutdown_kernel(kid, now=True)
Ejemplo n.º 25
0
        try:
            yield func(self, *args, **kwargs)
        except Exception as e:
            self.logger.error(traceback.format_exc())
            if self.request.headers.get("Accept",
                                        "").startswith("application/json"):
                self.send_json_error()
            else:
                self.write_error(500)
            return

    return wrapper


base_cache = BaseRedis()
sem = Semaphore(1)


def cache(prefix=None, key=None, ttl=60, hash=True, lock=True, separator=":"):
    """
    cache装饰器

    :param prefix: 指定prefix
    :param key: 指定key
    :param ttl: ttl (s)
    :param hash: 是否需要hash
    :param lock: -
    :param separator: key 分隔符
    :return:
    """
    key_ = key
Ejemplo n.º 26
0
import nfs
import os
import logging
import time
from tornado import web, gen
from tornado.locks import Semaphore
from tornado.httpclient import AsyncHTTPClient
from framework import settings
from framework.config import config

MAX_BODY_SIZE = 4 * 1024.0 * 1024.0 * 1024.0  # 4GB
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
AsyncHTTPClient.configure(None, max_body_size=MAX_BODY_SIZE)

logger = logging.getLogger('default')
semaphore = Semaphore(config.get('file_service_semaphore', 5))


class FileHandler(web.RequestHandler):
    @gen.coroutine
    def get(self):
        self.file_name = self.get_argument('filename')  # type: str
        self.space_dir = nfs.join(settings.REPO_DIR,
                                  settings.REPO_ANT_SPACENAME)
        if not nfs.exists(self.space_dir):
            nfs.makedirs(self.space_dir)
        self.file_path = nfs.join(self.space_dir, self.file_name)
        lock_file_name = nfs.extsep + self.file_name + nfs.extsep + 'lock'
        self.lock_file = nfs.join(self.space_dir, lock_file_name)
        logger.info('#%d Request file: %s', id(self.request), self.file_name)
Ejemplo n.º 27
0
class TornadoSubscriptionManager(SubscriptionManager):
    def __init__(self, pubnub_instance):

        subscription_manager = self

        self._message_queue = Queue()
        self._consumer_event = Event()
        self._cancellation_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = TornadoReconnectionManager(pubnub_instance)

        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class TornadoReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = TornadoReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)

    def _set_consumer_event(self):
        self._consumer_event.set()

    def _message_queue_put(self, message):
        self._message_queue.put(message)

    def _start_worker(self):
        self._consumer = TornadoSubscribeMessageWorker(self._pubnub,
                                                       self._listener_manager,
                                                       self._message_queue,
                                                       self._consumer_event)
        run = stack_context.wrap(self._consumer.run)
        self._pubnub.ioloop.spawn_callback(run)

    def reconnect(self):
        self._should_stop = False
        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
        # self._register_heartbeat_timer()

    def disconnect(self):
        self._should_stop = True
        self._stop_heartbeat_timer()
        self._stop_subscribe_loop()

    @tornado.gen.coroutine
    def _start_subscribe_loop(self):
        self._stop_subscribe_loop()

        yield self._subscription_lock.acquire()

        self._cancellation_event.clear()

        combined_channels = self._subscription_state.prepare_channel_list(True)
        combined_groups = self._subscription_state.prepare_channel_group_list(True)

        if len(combined_channels) == 0 and len(combined_groups) == 0:
            return

        envelope_future = Subscribe(self._pubnub) \
            .channels(combined_channels).channel_groups(combined_groups) \
            .timetoken(self._timetoken).region(self._region) \
            .filter_expression(self._pubnub.config.filter_expression) \
            .cancellation_event(self._cancellation_event) \
            .future()

        canceller_future = self._cancellation_event.wait()

        wi = tornado.gen.WaitIterator(envelope_future, canceller_future)

        # iterates 2 times: one for result one for cancelled
        while not wi.done():
            try:
                result = yield wi.next()
            except Exception as e:
                # TODO: verify the error will not be eaten
                logger.error(e)
                raise
            else:
                if wi.current_future == envelope_future:
                    e = result
                elif wi.current_future == canceller_future:
                    return
                else:
                    raise Exception("Unexpected future resolved: %s" % str(wi.current_future))

                if e.is_error():
                    # 599 error doesn't works - tornado use this status code
                    # for a wide range of errors, for ex:
                    # HTTP Server Error (599): [Errno -2] Name or service not known
                    if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
                        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
                        return

                    logger.error("Exception in subscribe loop: %s" % str(e))

                    if e.status is not None and e.status.category == PNStatusCategory.PNAccessDeniedCategory:
                        e.status.operation = PNOperationType.PNUnsubscribeOperation

                    self._listener_manager.announce_status(e.status)

                    self._reconnection_manager.start_polling()
                    self.disconnect()
                    return
                else:
                    self._handle_endpoint_call(e.result, e.status)

                    self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)

            finally:
                self._cancellation_event.set()
                yield tornado.gen.moment
                self._subscription_lock.release()
                self._cancellation_event.clear()
                break

    def _stop_subscribe_loop(self):
        if self._cancellation_event is not None and not self._cancellation_event.is_set():
            self._cancellation_event.set()

    def _stop_heartbeat_timer(self):
        if self._heartbeat_periodic_callback is not None:
            self._heartbeat_periodic_callback.stop()

    def _register_heartbeat_timer(self):
        super(TornadoSubscriptionManager, self)._register_heartbeat_timer()
        self._heartbeat_periodic_callback = PeriodicCallback(
            stack_context.wrap(self._perform_heartbeat_loop),
            self._pubnub.config.heartbeat_interval * TornadoSubscriptionManager.HEARTBEAT_INTERVAL_MULTIPLIER,
            self._pubnub.ioloop)
        self._heartbeat_periodic_callback.start()

    @tornado.gen.coroutine
    def _perform_heartbeat_loop(self):
        if self._heartbeat_call is not None:
            # TODO: cancel call
            pass

        cancellation_event = Event()
        state_payload = self._subscription_state.state_payload()
        presence_channels = self._subscription_state.prepare_channel_list(False)
        presence_groups = self._subscription_state.prepare_channel_group_list(False)

        if len(presence_channels) == 0 and len(presence_groups) == 0:
            return

        try:
            envelope = yield self._pubnub.heartbeat() \
                .channels(presence_channels) \
                .channel_groups(presence_groups) \
                .state(state_payload) \
                .cancellation_event(cancellation_event) \
                .future()

            heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options
            if envelope.status.is_error:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \
                        heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)
            else:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)

        except PubNubTornadoException:
            pass
            # TODO: check correctness
            # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
            #     self._start_subscribe_loop()
            # else:
            #     self._listener_manager.announce_status(e.status)
        except Exception as e:
            print(e)
        finally:
            cancellation_event.set()

    @tornado.gen.coroutine
    def _send_leave(self, unsubscribe_operation):
        envelope = yield Leave(self._pubnub) \
            .channels(unsubscribe_operation.channels) \
            .channel_groups(unsubscribe_operation.channel_groups).future()
        self._listener_manager.announce_status(envelope.status)
Ejemplo n.º 28
0
class HackadayAPI(object):
    """
    Core Hackaday.io API handler.
    """

    HAD_API_URI='https://api.hackaday.io/v1'
    HAD_AUTH_URI='https://hackaday.io/authorize'\
            '?client_id=%(CLIENT_ID)s'\
            '&response_type=code'
    HAD_TOKEN_URI='https://auth.hackaday.io/access_token'\
            '?client_id=%(CLIENT_ID)s'\
            '&client_secret=%(CLIENT_SECRET)s'\
            '&code=%(CODE)s'\
            '&grant_type=authorization_code'

    # Rate limiting
    RQLIM_TIME=30  # seconds

    def __init__(self, client_id, client_secret, api_key,
            api_uri=HAD_API_URI, auth_uri=HAD_AUTH_URI,
            token_uri=HAD_TOKEN_URI, rqlim_time=RQLIM_TIME,
            client=None, log=None, io_loop=None):

        if log is None:
            log = extdlog.getLogger(self.__class__.__module__)

        if io_loop is None:
            io_loop = IOLoop.current()

        if client is None:
            client = AsyncHTTPClient()

        self._client = client
        self._io_loop = io_loop
        self._log = log
        self._client_id = client_id
        self._client_secret = client_secret
        self._api_key = api_key
        self._api_uri = api_uri
        self._auth_uri = auth_uri
        self._token_uri = token_uri

        # Timestamps of last rqlim_num requests
        self._last_rq = 0.0
        self._rqlim_time = rqlim_time

        # Semaphore to limit concurrent access
        self._rq_sem = Semaphore(1)

        # If None, then no "forbidden" status is current.
        # Otherwise, this stores when the "forbidden" flag expires.
        self._forbidden_expiry = None

    @property
    def is_forbidden(self):
        """
        Return true if the last request returned a "forbidden" response
        code and was made within the last hour.
        """
        if self._forbidden_expiry is None:
            return False

        return self._forbidden_expiry > self._io_loop.time()

    @coroutine
    def _ratelimit_sleep(self):
        """
        Ensure we don't exceed the rate limit by tracking the request
        timestamps and adding a sleep if required.
        """
        now = self._io_loop.time()

        # Figure out if we need to wait before the next request
        delay = (self._last_rq + self._rqlim_time) - now
        self._log.trace('Last request at %f, delay: %f', self._last_rq, delay)
        if delay <= 0:
            # Nope, we're clear
            return

        self._log.debug('Waiting %f sec for rate limit', delay)
        yield sleep(delay)
        self._log.trace('Resuming operations')

    def _decode(self, response, default_encoding='UTF-8'):
        """
        Decode a given reponse body.
        """
        return decode_body(response.headers['Content-Type'], response.body,
                default_encoding)

    @coroutine
    def api_fetch(self, uri, **kwargs):
        """
        Make a raw request whilst respecting the HAD API request limits.

        This is primarily to support retrieval of avatars and other data
        without hitting the HAD.io site needlessly hard.
        """
        if 'connect_timeout' not in kwargs:
            kwargs['connect_timeout'] = 120.0
        if 'request_timeout' not in kwargs:
            kwargs['request_timeout'] = 120.0

        try:
            yield self._rq_sem.acquire()
            while True:
                try:
                    yield self._ratelimit_sleep()
                    response = yield self._client.fetch(uri, **kwargs)
                    self._last_rq = self._io_loop.time()
                    self._log.audit('Request:\n'
                        '%s %s\n'
                        'Headers: %s\n'
                        'Response: %s\n'
                        'Headers: %s\n'
                        'Body:\n%s',
                        response.request.method,
                        response.request.url,
                        response.request.headers,
                        response.code,
                        response.headers,
                        response_text(response))
                    break
                except gaierror as e:
                    if e.errno != EAGAIN:
                        raise
                    raise
                except HTTPError as e:
                    if e.response is not None:
                        self._log.audit('Request:\n'
                            '%s %s\n'
                            'Headers: %s\n'
                            'Response: %s\n'
                            'Headers: %s\n'
                            'Body:\n%s',
                            e.response.request.method,
                            e.response.request.url,
                            e.response.request.headers,
                            e.response.code,
                            e.response.headers,
                            response_text(e.response))
                    if e.code == 403:
                        # Back-end is rate limiting us.  Back off an hour.
                        self._forbidden_expiry = self._io_loop.time() \
                                + 3600.0
                    raise
                except ConnectionResetError:
                    # Back-end is blocking us.  Back off 15 minutes.
                    self._forbidden_expiry = self._io_loop.time() \
                            + 900.0
                    raise
        finally:
            self._rq_sem.release()

        raise Return(response)

    @coroutine
    def _api_call(self, uri, query=None, token=None, api_key=True, **kwargs):
        headers = kwargs.setdefault('headers', {})
        headers.setdefault('Accept', 'application/json')
        if token is not None:
            headers['Authorization'] = 'token %s' % token

        if query is None:
            query = {}

        if api_key:
            query.setdefault('api_key', self._api_key)

        self._log.audit('Query arguments: %r', query)
        encode_kv = lambda k, v : '%s=%s' % (k, urlparse.quote_plus(str(v)))
        def encode_item(item):
            (key, value) = item
            if isinstance(value, list):
                return '&'.join(map(lambda v : encode_kv(key, v), value))
            else:
                return encode_kv(key, value)

        if len(query) > 0:
            uri += '?%s' % '&'.join(map(encode_item, query.items()))

        if not uri.startswith('http'):
            uri = self._api_uri + uri

        self._log.audit('%s %r', kwargs.get('method','GET'), uri)
        response = yield self.api_fetch(uri, **kwargs)

        # If we get here, then our service is back.
        self._forbidden_expiry = None
        (ct, ctopts, body) = self._decode(response)
        if ct.lower() != 'application/json':
            raise ValueError('Server returned unrecognised type %s' % ct)
        raise Return(json.loads(body))

    # oAuth endpoints

    @property
    def auth_uri(self):
        """
        Return the auth URI that we need to send the user to if they're not
        logged in.
        """
        return self._auth_uri % dict(CLIENT_ID=self._client_id)

    def get_token(self, code):
        """
        Fetch the token for API queries from the authorization code given.
        """
        # Determine where to retrieve the access token from
        post_uri = self._token_uri % dict(
                CLIENT_ID=urlparse.quote_plus(self._client_id),
                CLIENT_SECRET=urlparse.quote_plus(self._client_secret),
                CODE=urlparse.quote_plus(code)
        )

        return self._api_call(
            post_uri, method='POST', body=b'', api_key=False)

    # Pagination options

    def _page_query_opts(self, page, per_page):
        query = {}
        if page is not None:
            query['page'] = int(page)
        if per_page is not None:
            query['per_page'] = int(per_page)

        return query

    # User API endpoints

    def get_current_user(self, token):
        """
        Fetch the current user's profile information.
        """
        return self._api_call('/me', token=token)

    def _user_query_opts(self, sortby, page, per_page):
        query = self._page_query_opts(page, per_page)
        sortby = UserSortBy(sortby)
        query['sortby'] = sortby.value
        return query

    _GET_USERS_WORKAROUND_RE = re.compile(
            r'<a href="([^"]+)" class="hacker-image">')
    _PRIVATE_MSG_LINK_RE = re.compile(
            r'<a href="/messages/new\?user=(\d+)">')
    @coroutine
    def get_user_ids(self, sortby=UserSortBy.influence, page=None):
        if page is None:
            page = 1

        sortby = UserSortBy(sortby)
        response = yield self.api_fetch(
                'https://hackaday.io/hackers?sort=%s&page=%d' \
                        % (sortby.value, page))
        (ct, ctopts, body) = self._decode(response)

        # Body is in HTML, look for links to profile pages
        pages = []
        for line in body.split('\n'):
            match = self._GET_USERS_WORKAROUND_RE.search(line)
            if match:
                pages.append(match.group(1))

        ids = []
        # Fetch each profile page (ugh!) and look for user ID
        # This is literally all we need at this point, the rest we'll
        # get from the API.
        for page in pages:
            if page.startswith('/'):
                page = 'https://hackaday.io' + page
            response = yield self.api_fetch(page)
            (ct, ctopts, body) = self._decode(response)
            for line in body.split('\n'):
                match = self._PRIVATE_MSG_LINK_RE.search(line)
                if match:
                    ids.append(int(match.group(1)))
                    break

        raise Return(ids)

    @coroutine
    def _get_users_workaround(self, sortby=UserSortBy.influence, page=None):
        ids = yield self.get_user_ids(sortby, page)
        users = yield self.get_users(ids=ids)
        raise Return(users)

    @coroutine
    def get_users(self, sortby=UserSortBy.influence,
            ids=None, page=None, per_page=None):
        """
        Retrieve a list of all users
        """
        query = self._user_query_opts(sortby, page, per_page)

        if ids is None:
            # sortby==newest is broken, has been for a while now.
            if sortby == UserSortBy.newest:
                result = yield self._get_users_workaround(
                        sortby, query.get('page'))
            else:
                result = yield self._api_call('/users', query=query)
        elif isinstance(ids, slice):
            query['ids'] = '%d,%d' % (ids.start, ids.stop)
            result = yield self._api_call('/users/range', query=query)
        else:
            ids = set(ids)
            if len(ids) > 50:
                raise ValueError('Too many IDs')
            query['ids'] = ','.join(['%d' % uid for uid in ids])
            result = yield self._api_call('/users/batch', query=query)
        raise Return(result)

    def search_users(self, screen_name=None, location=None, tag=None,
            sortby=UserSortBy.influence, page=None, per_page=None):
        query = self._user_query_opts(sortby, page, per_page)

        for (arg, val) in   (   ('screen_name', screen_name),
                                ('location', location),
                                ('tag', tag)    ):
            if val is not None:
                query[arg] = str(val)
        return self._api_call('/users/search', query=query)

    def get_user(self, user_id):
        return self._api_call('/users/%d' % user_id)

    def get_user_followers(self, user_id,
            sortby=UserSortBy.influence, page=None, per_page=None):
        query = self._user_query_opts(sortby, page, per_page)
        return self._api_call('/users/%d/followers' % user_id, query=query)

    def get_user_following(self, user_id,
            sortby=UserSortBy.influence, page=None, per_page=None):
        query = self._user_query_opts(sortby, page, per_page)
        return self._api_call('/users/%d/following' % user_id, query=query)

    def get_user_projects(self, user_id,
            sortby=ProjectSortBy.skulls, page=None, per_page=None):
        query = self._project_query_opts(sortby, page, per_page)
        return self._api_call('/users/%d/projects' % user_id, query=query)

    def get_user_skulls(self, user_id,
            sortby=UserSortBy.influence, page=None, per_page=None):
        query = self._user_query_opts(sortby, page, per_page)
        return self._api_call('/users/%d/skulls' % user_id, query=query)

    def get_user_links(self, user_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/users/%d/links' % user_id, query=query)

    def get_user_tags(self, user_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/users/%d/tags' % user_id, query=query)

    def get_user_pages(self, user_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/users/%d/pages' % user_id, query=query)

    # Projects API

    def _project_query_opts(self, sortby, page, per_page):
        query = self._page_query_opts(page, per_page)
        sortby = ProjectSortBy(sortby)
        query['sortby'] = sortby.value
        return query

    def get_projects(self, sortby=ProjectSortBy.skulls,
            ids=None, page=None, per_page=None):
        """
        Retrieve a list of all projects
        """
        query = self._project_query_opts(sortby, page, per_page)

        if ids is None:
            return self._api_call('/projects', query=query)
        elif isinstance(ids, slice):
            query['ids'] = '%d,%d' % (slice.start, slice.stop)
            return self._api_call('/projects/range', query=query)
        else:
            ids = set(ids)
            if len(ids) > 50:
                raise ValueError('Too many IDs')
            query['ids'] = ','.join(['%d' % pid for pid in ids])
            return self._api_call('/projects/batch', query=query)

    def search_projects(self, term,
            sortby=ProjectSortBy.skulls, page=None, per_page=None):
        query = self._project_query_opts(sortby, page, per_page)
        query['search_term'] = str(term)
        return self._api_call('/projects/search', query=query)

    def get_project(self, project_id):
        return self._api_call('/projects/%d' % project_id)

    def get_project_team(self, project_id,
            sortby=UserSortBy.influence, page=None, per_page=None):
        query = self._user_query_opts(sortby, page, per_page)
        return self._api_call('/projects/%d/team' % project_id, query=query)

    def get_project_followers(self, project_id,
            sortby=UserSortBy.influence, page=None, per_page=None):
        query = self._user_query_opts(sortby, page, per_page)
        return self._api_call('/projects/%d/followers' % project_id,
                query=query)

    def get_project_skulls(self, project_id,
            sortby=UserSortBy.influence, page=None, per_page=None):
        query = self._user_query_opts(sortby, page, per_page)
        return self._api_call('/projects/%d/skulls' % project_id,
                query=query)

    def get_project_comments(self, project_id,
            sortby=UserSortBy.influence, page=None, per_page=None):
        query = self._user_query_opts(sortby, page, per_page)
        return self._api_call('/projects/%d/comments' % project_id,
                query=query)

    def get_project_links(self, project_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/projects/%d/links' % project_id,
                query=query)

    def get_project_images(self, project_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/projects/%d/images' % project_id,
                query=query)

    def get_project_components(self, project_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/projects/%d/components' % project_id,
                query=query)

    def get_project_tags(self, project_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/projects/%d/tags' % project_id, query=query)

    def get_project_logs(self, project_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/projects/%d/logs' % project_id, query=query)

    def get_project_instructions(self, project_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/projects/%d/instructions' % project_id,
                query=query)

    def get_project_details(self, project_id, page=None, per_page=None):
        query = self._page_query_opts(page, per_page)
        return self._api_call('/projects/%d/details' % project_id,
                query=query)
Ejemplo n.º 29
0
import threading, psycopg2 as pg2
import datetime
from tornado.locks import Semaphore
from tornado import gen
from collections import OrderedDict
trunks = OrderedDict()
trunks_groups = OrderedDict()
trunks_semaphore = Semaphore(1)


@gen.coroutine
def _read_trunks():
    global trunks
    global trunks_groups
    if trunks and trunks_groups:
        raise gen.Return(trunks)

    trunks = OrderedDict()
    trunks_groups = OrderedDict()
    try:
        db = pg2.connect(
            'host=192.168.222.20 port=5432 dbname=freeswitch_trunks user=freeswitch password=freeswitch'
        )
        c = db.cursor()
        SELECT = 'select tr.name as group_trunks, trl.phone as trunk_name, o.name as operator_name, o.icon_name, ' \
        'trl.in_trunk_position, c.id as channel_id, trl.direction, d.id as device_id, ' \
        'c.lines as max_lines, c.sip_gateway_name, d.address, c.port, dcl.name as device_class, tr.screen_position, ' \
        'trl.trunk_id ' \
        'from trunk_lines trl ' \
        'left join operators o on (o.id=trl.operator_id) ' \
        'left join trunks tr on (tr.id=trl.trunk_id) ' \
Ejemplo n.º 30
0
def cull_idle(
    url, api_token, profiles_list = [], db_filename = "profile_quotas.db", check_every = 600, concurrency=10
):

    """Shutdown idle single-user servers"""
    
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(url=url + '/users', headers=auth_header)
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        "server" is the entire server model from the API.

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning(
                "Not culling server %s with pending %s", log_name, server['pending']
            )
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning(
                "Not culling not-ready not-pending server %s: %s", log_name, server
            )
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None


        # CUSTOM CULLING TEST CODE HERE
        # Add in additional server tests here.  Return False to mean "don't
        # cull", True means "cull immediately", or, for example, update some
        # other variables like inactive_limit.
        #
        # Here, server['state'] is the result of the get_state method
        # on the spawner.  This does *not* contain the below by
        # default, you may have to modify your spawner to make this
        # work.  The `user` variable is the user model from the API.
        #
        # if server['state']['profile_name'] == 'unlimited'
        #     return False
        # inactive_limit = server['state']['culltime']

        should_cull = False

        # if there's no profile info in the server state to base the determinaton on, we got nothing to go on
        profile_slug = server.get("state", {}).get("profile_slug", None)
        balance = float("inf")

        if profile_slug:
            conn = db.get_connection(db_filename)
            db.update_user_tokens(conn, profiles_list, user['name'], user['admin'])
            
            for profile in profiles_list:
                if profile["slug"] == profile_slug and "quota" in profile:
                    hours = (check_every / 60 / 60)
                    db.log_usage(conn, profiles_list, user['name'], profile_slug, hours, user['admin'])
                    db.charge_tokens(conn, profiles_list, user['name'], profile_slug, hours, user['admin'])
                    current_balance = db.get_balance(conn, profiles_list, user['name'], profile_slug, user['admin'])

                    if current_balance < 0.0:
                        pass
                        # don't actually cull, let the balance go negative (since we don't have a way to alert the user that their server is about to be killed)
                        # should_cull = True
            db.close_connection(conn)

        if should_cull:
            app_log.info(
                "Culling server %s (balance for profile %s is %s)", log_name, profile_slug, balance
            )

        if not should_cull:
            app_log.debug(
                "Not culling server %s (balance for profile %s is %s)",
                log_name,
                profile_slug,
                balance,
            )
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url, method='DELETE', headers=auth_header)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
    
    
    
    
    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Ejemplo n.º 31
0
class BaseHandler(tornado.web.RequestHandler):
    pairs = {}  # type: Dict[str, str]
    analyzers = {}  # type: Dict[str, Tuple[str, str]]
    generators = {}  # type: Dict[str, Tuple[str, str]]
    taggers = {}  # type: Dict[str, Tuple[str, str]]
    spellers = {}  # type: Dict[str, Tuple[str, str]]
    # (l1, l2): [translation.Pipeline], only contains flushing pairs!
    pipelines = {}  # type: Dict[str, List]
    pipelines_holding = []  # type: List
    callback = None
    timeout = None
    scale_mt_logs = False
    verbosity = 0
    api_keys_conf = None

    # dict representing a graph of translation pairs; keys are source languages
    # e.g. pairs_graph['eng'] = ['fra', 'spa']
    pairs_graph = {}  # type: Dict[str, List[str]]
    # 2-D dict storing the shortest path for a chained translation pair
    # keys are source and target languages
    # e.g. paths['eng']['fra'] = ['eng', 'spa', 'fra']
    paths = {}  # type: Dict[str, Dict[str, List[str]]]

    stats = {
        'startdate': datetime.now(),
        'useCount': {},
        'vmsize': 0,
        'timing': [],
    }

    # (l1, l2): translation.ParsedModes
    pipeline_cmds = {}  # type: Dict
    max_pipes_per_pair = 1
    min_pipes_per_pair = 0
    max_users_per_pipe = 5
    max_idle_secs = 0
    restart_pipe_after = 1000
    doc_pipe_sem = Semaphore(3)
    # Empty the url_cache[pair] when it's this full:
    max_inmemory_url_cache = 1000  # type: int
    url_cache = {}  # type: Dict[Tuple[str, str], Dict[str, str]]
    url_cache_path = None  # type: Optional[str]
    # Keep half a gig free when storing url_cache to disk:
    min_free_space_disk_url_cache = 512 * 1024 * 1024  # type: int

    def initialize(self):
        self.callback = self.get_argument('callback', default=None)

    @classmethod
    def init_pairs_graph(cls):
        for pair in cls.pairs:
            lang1, lang2 = pair.split('-')
            if lang1 in cls.pairs_graph:
                cls.pairs_graph[lang1].append(lang2)
            else:
                cls.pairs_graph[lang1] = [lang2]

    @classmethod
    def calculate_paths(cls, start):
        nodes = set()
        for pair in map(lambda x: x.split('-'), cls.pairs):
            nodes.add(pair[0])
            nodes.add(pair[1])
        dists = {}
        prevs = {}
        dists[start] = 0

        while nodes:
            u = min(nodes, key=lambda u: dists.get(u, sys.maxsize))
            nodes.remove(u)
            for v in cls.pairs_graph.get(u, []):
                if v in nodes:
                    other = dists.get(
                        u, sys.maxsize
                    ) + 1  # TODO: weight(u, v) -- lower weight = better translation
                    if other < dists.get(v, sys.maxsize):
                        dists[v] = other
                        prevs[v] = u

        cls.paths[start] = {}
        for u in prevs:
            prev = prevs[u]
            path = [u]
            while prev:
                path.append(prev)
                prev = prevs.get(prev)
            cls.paths[start][u] = list(reversed(path))

    @classmethod
    def init_paths(cls):
        for lang in cls.pairs_graph:
            cls.calculate_paths(lang)

    def log_vmsize(self):
        if self.verbosity < 1:
            return
        scale = {'kB': 1024, 'mB': 1048576, 'KB': 1024, 'MB': 1048576}
        try:
            for line in open('/proc/%d/status' % os.getpid()):
                if line.startswith('VmSize:'):
                    _, num, unit = line.split()
                    break
            vmsize = int(num) * scale[unit]
            if vmsize > self.stats['vmsize']:
                logging.warning('VmSize of %s from %d to %d', os.getpid(),
                                self.stats['vmsize'], vmsize)
                self.stats['vmsize'] = vmsize
        except Exception as e:
            # Don't fail just because we couldn't log:
            logging.info('Exception in log_vmsize: %s', e)

    def send_response(self, data):
        self.log_vmsize()
        if isinstance(data, dict) or isinstance(data, list):
            data = dump_json(data)
            self.set_header('Content-Type', 'application/json; charset=UTF-8')

        if self.callback:
            self.set_header('Content-Type',
                            'application/javascript; charset=UTF-8')
            self._write_buffer.append(utf8('%s(%s)' % (self.callback, data)))
        else:
            self._write_buffer.append(utf8(data))
        self.finish()

    def write_error(self, status_code, **kwargs):
        http_explanations = {
            400:
            'Request not properly formatted or contains languages that Apertium APy does not support',
            404:
            'Resource requested does not exist. URL may have been mistyped',
            408:
            'Server did not receive a complete request within the time it was prepared to wait. Try again',
            500:
            'Unexpected condition on server. Request could not be fulfilled.',
        }
        explanation = kwargs.get('explanation',
                                 http_explanations.get(status_code, ''))
        if 'exc_info' in kwargs and len(kwargs['exc_info']) > 1:
            exception = kwargs['exc_info'][1]
            if hasattr(exception, 'log_message') and exception.log_message:
                explanation = exception.log_message % exception.args
            elif hasattr(exception, 'reason'):
                explanation = exception.reason or tornado.httputil.responses.get(
                    status_code, 'Unknown')
            else:
                explanation = tornado.httputil.responses.get(
                    status_code, 'Unknown')

        result = {
            'status': 'error',
            'code': status_code,
            'message': tornado.httputil.responses.get(status_code, 'Unknown'),
            'explanation': explanation,
        }

        data = dump_json(result)
        self.set_header('Content-Type', 'application/json; charset=UTF-8')

        if self.callback:
            self.set_header('Content-Type',
                            'application/javascript; charset=UTF-8')
            self._write_buffer.append(utf8('%s(%s)' % (self.callback, data)))
        else:
            self._write_buffer.append(utf8(data))
        self.finish()

    def set_default_headers(self):
        self.set_header('Access-Control-Allow-Origin', '*')
        self.set_header('Access-Control-Allow-Methods', 'GET,POST,OPTIONS')
        self.set_header(
            'Access-Control-Allow-Headers',
            'accept, cache-control, origin, x-requested-with, x-file-name, content-type'
        )

    @tornado.web.asynchronous
    def post(self):
        self.get()

    def options(self):
        self.set_status(204)
        self.finish()
Ejemplo n.º 32
0
    class TornadoTransmission():
        def __init__(self,
                     max_concurrent_batches=10,
                     block_on_send=False,
                     block_on_response=False,
                     max_batch_size=100,
                     send_frequency=timedelta(seconds=0.25),
                     user_agent_addition=''):
            if not has_tornado:
                raise ImportError(
                    'TornadoTransmission requires tornado, but it was not found.'
                )

            self.block_on_send = block_on_send
            self.block_on_response = block_on_response
            self.max_batch_size = max_batch_size
            self.send_frequency = send_frequency

            user_agent = "libhoney-py/" + VERSION
            if user_agent_addition:
                user_agent += " " + user_agent_addition

            self.http_client = AsyncHTTPClient(
                force_instance=True, defaults=dict(user_agent=user_agent))

            # libhoney adds events to the pending queue for us to send
            self.pending = Queue(maxsize=1000)
            # we hand back responses from the API on the responses queue
            self.responses = Queue(maxsize=2000)

            self.batch_data = {}
            self.sd = statsd.StatsClient(prefix="libhoney")
            self.batch_sem = Semaphore(max_concurrent_batches)

        def start(self):
            ioloop.IOLoop.current().spawn_callback(self._sender)

        def send(self, ev):
            '''send accepts an event and queues it to be sent'''
            self.sd.gauge("queue_length", self.pending.qsize())
            try:
                if self.block_on_send:
                    self.pending.put(ev)
                else:
                    self.pending.put_nowait(ev)
                self.sd.incr("messages_queued")
            except QueueFull:
                response = {
                    "status_code": 0,
                    "duration": 0,
                    "metadata": ev.metadata,
                    "body": "",
                    "error": "event dropped; queue overflow",
                }
                if self.block_on_response:
                    self.responses.put(response)
                else:
                    try:
                        self.responses.put_nowait(response)
                    except QueueFull:
                        # if the response queue is full when trying to add an event
                        # queue is full response, just skip it.
                        pass
                self.sd.incr("queue_overflow")

        # We're using the older decorator/yield model for compatibility with
        # Python versions before 3.5.
        # See: http://www.tornadoweb.org/en/stable/guide/coroutines.html#python-3-5-async-and-await
        @gen.coroutine
        def _sender(self):
            '''_sender is the control loop that pulls events off the `self.pending`
            queue and submits batches for actual sending. '''
            events = []
            last_flush = time.time()
            while True:
                try:
                    ev = yield self.pending.get(timeout=self.send_frequency)
                    if ev is None:
                        # signals shutdown
                        yield self._flush(events)
                        return
                    events.append(ev)
                    if (len(events) > self.max_batch_size
                            or time.time() - last_flush >
                            self.send_frequency.total_seconds()):
                        yield self._flush(events)
                        events = []
                except TimeoutError:
                    yield self._flush(events)
                    events = []
                    last_flush = time.time()

        @gen.coroutine
        def _flush(self, events):
            if not events:
                return
            for dest, group in group_events_by_destination(events).items():
                yield self._send_batch(dest, group)

        @gen.coroutine
        def _send_batch(self, destination, events):
            ''' Makes a single batch API request with the given list of events. The
            `destination` argument contains the write key, API host and dataset
            name used to build the request.'''
            start = time.time()
            status_code = 0

            try:
                # enforce max_concurrent_batches
                yield self.batch_sem.acquire()
                url = urljoin(urljoin(destination.api_host, "/1/batch/"),
                              destination.dataset)
                payload = []
                for ev in events:
                    event_time = ev.created_at.isoformat()
                    if ev.created_at.tzinfo is None:
                        event_time += "Z"
                    payload.append({
                        "time": event_time,
                        "samplerate": ev.sample_rate,
                        "data": ev.fields()
                    })
                req = HTTPRequest(
                    url,
                    method='POST',
                    headers={
                        "X-Honeycomb-Team": destination.writekey,
                        "Content-Type": "application/json",
                    },
                    body=json.dumps(payload, default=json_default_handler),
                )
                self.http_client.fetch(req, self._response_callback)
                # store the events that were sent so we can process responses later
                # it is important that we delete these eventually, or we'll run into memory issues
                self.batch_data[req] = {"start": start, "events": events}
            except Exception as e:
                # Catch all exceptions and hand them to the responses queue.
                self._enqueue_errors(status_code, e, start, events)
            finally:
                self.batch_sem.release()

        def _enqueue_errors(self, status_code, error, start, events):
            for ev in events:
                self.sd.incr("send_errors")
                self._enqueue_response(status_code, "", error, start,
                                       ev.metadata)

        def _response_callback(self, resp):
            # resp.request should be the same HTTPRequest object built by _send_batch
            # and mapped to values in batch_data
            events = self.batch_data[resp.request]["events"]
            start = self.batch_data[resp.request]["start"]
            try:
                status_code = resp.code
                resp.rethrow()

                statuses = [d["status"] for d in json.loads(resp.body)]
                for ev, status in zip(events, statuses):
                    self._enqueue_response(status, "", None, start,
                                           ev.metadata)
                    self.sd.incr("messages_sent")
            except Exception as e:
                self._enqueue_errors(status_code, e, start, events)
                self.sd.incr("send_errors")
            finally:
                # clean up the data for this batch
                del self.batch_data[resp.request]

        def _enqueue_response(self, status_code, body, error, start, metadata):
            resp = {
                "status_code": status_code,
                "body": body,
                "error": error,
                "duration": (time.time() - start) * 1000,
                "metadata": metadata
            }
            if self.block_on_response:
                self.responses.put(resp)
            else:
                try:
                    self.responses.put_nowait(resp)
                except QueueFull:
                    pass

        def close(self):
            '''call close to send all in-flight requests and shut down the
                senders nicely. Times out after max 20 seconds per sending thread
                plus 10 seconds for the response queue'''
            try:
                self.pending.put(None, 10)
            except QueueFull:
                pass
            # signal to the responses queue that nothing more is coming.
            try:
                self.responses.put(None, 10)
            except QueueFull:
                pass

        def get_response_queue(self):
            ''' return the responses queue on to which will be sent the response
            objects from each event send'''
            return self.responses
Ejemplo n.º 33
0
class ManagedKernelPool(KernelPool):
    '''
    Spawns a pool of kernels. Manages access to individual kernels using a
    borrower/lender pattern. Cleans them all up when shut down.
    '''
    def __init__(self, prespawn_count, kernel_manager):
        # Make sure there's at least one kernel as a delegate
        if not prespawn_count:
            prespawn_count = 1

        super(ManagedKernelPool, self).__init__(prespawn_count, kernel_manager)

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.pool_index = 0
        self.kernel_pool = []

        kernel_ids = self.kernel_manager.list_kernel_ids()
        self.kernel_semaphore = Semaphore(len(kernel_ids))

        # Connect to any prespawned kernels
        for kernel_id in kernel_ids:
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(
                kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))

    @gen.coroutine
    def acquire(self):
        '''
        Returns a kernel client and id for use and removes the kernel the resource pool.
        Kernels must be returned via the release method.
        :return: Returns a kernel client and a kernel id
        '''
        yield self.kernel_semaphore.acquire()
        kernel_id = self.kernel_pool[0]
        del self.kernel_pool[0]
        raise gen.Return((self.kernel_clients[kernel_id], kernel_id))

    def release(self, kernel_id):
        '''
        Returns a kernel back to the resource pool.
        :param kernel_id: Id of the kernel to return to the pool
        '''
        self.kernel_pool.append(kernel_id)
        self.kernel_semaphore.release()

    def _on_reply(self, kernel_id, msg_list):
        idents, msg_list = self.kernel_clients[
            kernel_id].session.feed_identities(msg_list)
        msg = self.kernel_clients[kernel_id].session.deserialize(msg_list)
        self.on_recv_funcs[kernel_id](msg)

    def create_on_reply(self, kernel_id):
        '''
        The lambda is used to handle a specific reply per kernel and provide a unique stack scope per invocation.
        '''
        return lambda msg_list: self._on_reply(kernel_id, msg_list)

    def on_recv(self, kernel_id, func):
        '''
        Registers a callback for io_pub messages for a particular kernel.
        This is needed to avoid having multiple callbacks per kernel client.
        :param kernel_id: Id of the kernel
        :param func: Callback function to handle the message
        '''
        self.on_recv_funcs[kernel_id] = func

    def shutdown(self):
        '''
        Shuts down all kernels in the pool and in the kernel manager.
        '''
        for kid in self.kernel_clients:
            self.kernel_clients[kid].stop_channels()
            self.kernel_manager.shutdown_kernel(kid, now=True)

        # Any remaining kernels that were not created for our pool should be shutdown
        super(ManagedKernelPool, self).shutdown()
Ejemplo n.º 34
0
import misaka
import psutil
import requests
from feedgen.feed import FeedGenerator
from pytube import YouTube
from tornado import gen, httputil, ioloop, iostream, process, web
from tornado.locks import Semaphore

__version__ = '3.0'

key = None
video_links = {}
playlist_feed = {}
channel_feed = {}
conversion_queue = {}
converting_lock = Semaphore(2)


def get_youtube_url(video):
    if video in video_links and video_links[video][
            'expire'] > datetime.datetime.now():
        return video_links[video]['url']
    yt = YouTube('http://www.youtube.com/watch?v=' + video)
    vid = yt.streams.get_highest_resolution().url
    parts = {
        part.split('=')[0]: part.split('=')[1]
        for part in vid.split('?')[-1].split('&')
    }
    link = {
        'url': vid,
        'expire': datetime.datetime.fromtimestamp(int(parts['expire']))
Ejemplo n.º 35
0
class ManagedKernelPool(KernelPool):
    """Spawns a pool of kernels that are treated as identical delegates for
    future requests.

    Manages access to individual kernels using a borrower/lender pattern.
    Cleans them all up when shut down.

    Parameters
    ----------
    prespawn_count
        Number of kernels to spawn immediately
    kernel_manager
        Kernel manager instance

    Attributes
    ----------
    kernel_clients : dict
        Map of kernel IDs to client instances for communicating with them
    on_recv_funcs : dict
        Map of kernel IDs to iopub callback functions
    kernel_pool : list
        List of available delegate kernel IDs
    kernel_semaphore : tornado.locks.Semaphore
        Semaphore that controls access to the kernel pool
    """
    def __init__(self, prespawn_count, kernel_manager):
        # Make sure there's at least one kernel as a delegate
        if not prespawn_count:
            prespawn_count = 1

        super(ManagedKernelPool, self).__init__(prespawn_count, kernel_manager)

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.kernel_pool = []

        kernel_ids = self.kernel_manager.list_kernel_ids()
        self.kernel_semaphore = Semaphore(len(kernel_ids))

        # Create clients and iopub handlers for prespawned kernels
        for kernel_id in kernel_ids:
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))

    @gen.coroutine
    def acquire(self):
        """Gets a kernel client and removes it from the available pool of
        clients.

        Returns
        -------
        tuple
            Kernel client instance, kernel ID
        """
        yield self.kernel_semaphore.acquire()
        kernel_id = self.kernel_pool[0]
        del self.kernel_pool[0]
        raise gen.Return((self.kernel_clients[kernel_id], kernel_id))

    def release(self, kernel_id):
        """Puts a kernel back into the pool of kernels available to handle
        requests.

        Parameters
        ----------
        kernel_id : str
            Kernel to return to the pool
        """
        self.kernel_pool.append(kernel_id)
        self.kernel_semaphore.release()

    def _on_reply(self, kernel_id, msg_list):
        """Invokes the iopub callback registered for the `kernel_id` and passes
        it a deserialized list of kernel messsages.

        Parameters
        ----------
        kernel_id : str
            Kernel that sent the reply
        msg_list : list
            List of 0mq messages
        """
        idents, msg_list = self.kernel_clients[kernel_id].session.feed_identities(msg_list)
        msg = self.kernel_clients[kernel_id].session.deserialize(msg_list)
        self.on_recv_funcs[kernel_id](msg)

    def create_on_reply(self, kernel_id):
        """Creates an anonymous function to handle reply messages from the
        kernel.

        Parameters
        ----------
        kernel_id
            Kernel to listen to

        Returns
        -------
        function
            Callback function taking a kernel ID and 0mq message list
        """
        return lambda msg_list: self._on_reply(kernel_id, msg_list)

    def on_recv(self, kernel_id, func):
        """Registers a callback function for iopub messages from a particular
        kernel.

        This is needed to avoid having multiple callbacks per kernel client.

        Parameters
        ----------
        kernel_id
            Kernel from which to receive iopub messages
        func
            Callback function to use for kernel iopub messages
        """
        self.on_recv_funcs[kernel_id] = func

    def shutdown(self):
        """Shuts down all kernels and their clients.
        """
        for kid in self.kernel_clients:
            self.kernel_clients[kid].stop_channels()
            self.kernel_manager.shutdown_kernel(kid, now=True)

        # Any remaining kernels that were not created for our pool should be shutdown
        super(ManagedKernelPool, self).shutdown()
Ejemplo n.º 36
0
class TornadoSubscriptionManager(SubscriptionManager):
    def __init__(self, pubnub_instance):

        subscription_manager = self

        self._message_queue = Queue()
        self._consumer_event = Event()
        self._cancellation_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = TornadoReconnectionManager(pubnub_instance)

        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class TornadoReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = TornadoReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)

    def _set_consumer_event(self):
        self._consumer_event.set()

    def _message_queue_put(self, message):
        self._message_queue.put(message)

    def _start_worker(self):
        self._consumer = TornadoSubscribeMessageWorker(self._pubnub,
                                                       self._listener_manager,
                                                       self._message_queue,
                                                       self._consumer_event)
        run = stack_context.wrap(self._consumer.run)
        self._pubnub.ioloop.spawn_callback(run)

    def reconnect(self):
        self._should_stop = False
        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
        # self._register_heartbeat_timer()

    def disconnect(self):
        self._should_stop = True
        self._stop_heartbeat_timer()
        self._stop_subscribe_loop()

    @tornado.gen.coroutine
    def _start_subscribe_loop(self):
        self._stop_subscribe_loop()

        yield self._subscription_lock.acquire()

        self._cancellation_event.clear()

        combined_channels = self._subscription_state.prepare_channel_list(True)
        combined_groups = self._subscription_state.prepare_channel_group_list(True)

        if len(combined_channels) == 0 and len(combined_groups) == 0:
            return

        envelope_future = Subscribe(self._pubnub) \
            .channels(combined_channels).channel_groups(combined_groups) \
            .timetoken(self._timetoken).region(self._region) \
            .filter_expression(self._pubnub.config.filter_expression) \
            .cancellation_event(self._cancellation_event) \
            .future()

        canceller_future = self._cancellation_event.wait()

        wi = tornado.gen.WaitIterator(envelope_future, canceller_future)

        # iterates 2 times: one for result one for cancelled
        while not wi.done():
            try:
                result = yield wi.next()
            except Exception as e:
                # TODO: verify the error will not be eaten
                logger.error(e)
                raise
            else:
                if wi.current_future == envelope_future:
                    e = result
                elif wi.current_future == canceller_future:
                    return
                else:
                    raise Exception("Unexpected future resolved: %s" % str(wi.current_future))

                if e.is_error():
                    # 599 error doesn't works - tornado use this status code
                    # for a wide range of errors, for ex:
                    # HTTP Server Error (599): [Errno -2] Name or service not known
                    if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
                        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
                        return

                    logger.error("Exception in subscribe loop: %s" % str(e))

                    if e.status is not None and e.status.category == PNStatusCategory.PNAccessDeniedCategory:
                        e.status.operation = PNOperationType.PNUnsubscribeOperation

                    self._listener_manager.announce_status(e.status)

                    self._reconnection_manager.start_polling()
                    self.disconnect()
                    return
                else:
                    self._handle_endpoint_call(e.result, e.status)

                    self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)

            finally:
                self._cancellation_event.set()
                yield tornado.gen.moment
                self._subscription_lock.release()
                self._cancellation_event.clear()
                break

    def _stop_subscribe_loop(self):
        if self._cancellation_event is not None and not self._cancellation_event.is_set():
            self._cancellation_event.set()

    def _stop_heartbeat_timer(self):
        if self._heartbeat_periodic_callback is not None:
            self._heartbeat_periodic_callback.stop()

    def _register_heartbeat_timer(self):
        super(TornadoSubscriptionManager, self)._register_heartbeat_timer()
        self._heartbeat_periodic_callback = PeriodicCallback(
            stack_context.wrap(self._perform_heartbeat_loop),
            self._pubnub.config.heartbeat_interval * TornadoSubscriptionManager.HEARTBEAT_INTERVAL_MULTIPLIER,
            self._pubnub.ioloop)
        self._heartbeat_periodic_callback.start()

    @tornado.gen.coroutine
    def _perform_heartbeat_loop(self):
        if self._heartbeat_call is not None:
            # TODO: cancel call
            pass

        cancellation_event = Event()
        state_payload = self._subscription_state.state_payload()
        presence_channels = self._subscription_state.prepare_channel_list(False)
        presence_groups = self._subscription_state.prepare_channel_group_list(False)

        if len(presence_channels) == 0 and len(presence_groups) == 0:
            return

        try:
            envelope = yield self._pubnub.heartbeat() \
                .channels(presence_channels) \
                .channel_groups(presence_groups) \
                .state(state_payload) \
                .cancellation_event(cancellation_event) \
                .future()

            heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options
            if envelope.status.is_error:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \
                        heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)
            else:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)

        except PubNubTornadoException:
            pass
            # TODO: check correctness
            # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
            #     self._start_subscribe_loop()
            # else:
            #     self._listener_manager.announce_status(e.status)
        except Exception as e:
            print(e)
        finally:
            cancellation_event.set()

    @tornado.gen.coroutine
    def _send_leave(self, unsubscribe_operation):
        envelope = yield Leave(self._pubnub) \
            .channels(unsubscribe_operation.channels) \
            .channel_groups(unsubscribe_operation.channel_groups).future()
        self._listener_manager.announce_status(envelope.status)
Ejemplo n.º 37
0
class Crawler(object):
    def _init_defaults(self):
        self.start_link = None
        self.link_priority = 2
        self.img_priority = 8
        self.politeness = 2
        self.workers_limit = 10  # allow at most 10 concurrent workers
        self.link_regex = re.compile("^http://.*")
        self.img_regex = re.compile(".*")
        self.fname_digits = 4
        self.min_width = 200
        self.min_height = 200
        self.img_dir = "E:/tmp/"
        self.idle_wait_loops = 100
        self.port = 8888

    def _load_config(self):
        parser = ConfigParser.ConfigParser()
        parser.read("config.ini")

        if parser.has_option("global", "starturl"):
            starturl = parser.get("global", "starturl")
            self.start_link = starturl

        if parser.has_option("global", "linkregex"):
            self.link_regex = re.compile(parser.get("global", "linkregex"))
        if parser.has_option("global", "imgregex"):
            self.img_regex = re.compile(parser.get("global", "imgregex"))

        if parser.has_option("global", "politeness"):
            politeness = parser.getint("global", "politeness")
            if politeness <= 0:
                print "politeness must be a positive integer"
                raise SystemExit()
            self.politeness = politeness
        if parser.has_option("global", "imgdir"):
            imgdir = parser.get("global", "imgdir")
            if not os.path.exists(imgdir) or not os.path.isdir(imgdir):
                print "invalid imgdir configuration"
                raise SystemExit()
            if not imgdir.endswith("/"):
                imgdir += "/"
            self.img_dir = imgdir

        if parser.has_option("global", "minwidth"):
            width = parser.getint("global", "minwidth")
            self.min_width = width
        if parser.has_option("global", "minheight"):
            height = parser.getint("global", "minheight")
            self.min_height = height

    def __init__(self, start_link=None):
        self._init_defaults()
        # Now load the config file to override defaults
        self._load_config()

        if start_link:
            self.start_link = start_link
        if not self.start_link:
            raise SystemExit("No start link is provided, exiting now...")
        links.put(self.start_link)
        self.semaphore = Semaphore(self.workers_limit)

    @gen.coroutine
    def run(self):
        # First start an debug server
        app = Application([(r"/", WebHandler)])
        server = HTTPServer(app)
        server.listen(self.port)

        idle_loops = 0
        while True:
            if imageurls.qsize() == 0 and links.qsize() == 0:
                print "Both link and image queues are empty now"
                idle_loops += 1
                if idle_loops == self.idle_wait_loops:
                    break
            else:
                idle_loops = 0  # clear the idle loop counter
                if imageurls.qsize() == 0:
                    self.handle_links()
                elif links.qsize() == 0:
                    self.handle_imageurls()
                else:
                    choices = [0] * self.link_priority + [1] * self.img_priority
                    choice = random.choice(choices)
                    if choice:
                        self.handle_imageurls()
                    else:
                        self.handle_links()
            yield gen.sleep(0.1 * self.politeness)
        # Wait for all link handlers
        links.join()
        # Handling imageurls if generated by the last few links
        while imageurls.qsize():
            self.handle_imageurls()
        imageurls.join()

    @gen.coroutine
    def handle_links(self):
        yield self.semaphore.acquire()
        newlink = yield links.get()

        # Make sure we haven't visited this one
        if newlink in visited_links:
            self.semaphore.release()
            raise gen.Return()
        visited_links.add(newlink)

        # use async client to fetch this url
        client = AsyncHTTPClient()
        tries = 3  # Give it 3 chances before putting it in failure
        while tries:
            response = yield client.fetch(newlink)
            if response.code == 200:
                break
            tries -= 1

        # release the semaphore
        self.semaphore.release()
        if response.code != 200:
            link_failures.append(newlink)
            print "[FAILURE] - %s" % newlink
            raise gen.Return()

        # TODO: replace this with a report api
        print "[VISITED] - %s" % newlink

        # parse url to get the base url
        components = urlparse.urlparse(newlink)
        baseurl = components[0] + "://" + components[1]
        path = components[2]

        # parse the html with bs
        soup = bs4.BeautifulSoup(response.body)
        # extract valid links and put into links
        a_tags = soup.find_all("a")
        for tag in a_tags:
            if "href" not in tag.attrs:
                continue
            href = tag["href"]
            if href.startswith("#"):
                continue
            if href.startswith("/"):  # relative
                href = baseurl + href
            else:
                if not path.endswith("/"):
                    path = path[: path.rfind("/") + 1]
                href = baseurl + "/" + path + href
            if not self.link_regex.match(href):
                continue
            if href in visited_links:
                continue
            links.put(href)
            print "NEWLINK:", href

        # extract imgs and put into imageurls
        img_tags = soup.find_all("img")
        for tag in img_tags:
            if "src" not in tag.attrs:
                continue
            src = tag["src"]
            if src.startswith("/"):  # relative
                src = baseurl + src
            if not self.img_regex.match(src):
                continue
            if src in downloaded_images:
                continue
            imageurls.put(src)
            print "NEW IMAGE:", src

        # now the task is done
        links.task_done()

    @gen.coroutine
    def handle_imageurls(self):
        yield self.semaphore.acquire()
        imgurl = yield imageurls.get()

        if imgurl in downloaded_images:
            self.semaphore.release()
            raise gen.Return()
        # mark the image as downloaded
        downloaded_images.add(imgurl)

        # use async client to fetch this url
        client = AsyncHTTPClient()
        tries = 3  # Give it 3 chances before putting it in failure
        while tries:
            response = yield client.fetch(imgurl)
            if response.code == 200:
                break
            tries -= 1
        # Download is finished, release semaphore
        self.semaphore.release()

        if response.code != 200:
            download_failures.append(imgurl)
            print "[FAILURE] - %s" % imgurl
            raise gen.Return()

        # TODO: replace this with a report api
        print "[DOWNLOADED] - %s" % imgurl

        # Read the file content
        img = PIL.Image.open(response.buffer)
        w, h = img.size
        if w < self.min_width or h < self.min_height:
            raise gen.Return()

        # find out the image extension, default to jpg
        if "." in imgurl:
            ext = imgurl.split(".")[-1].lower()
            if ext not in ["jpg", "png", "gif"]:
                ext = "jpg"
        elif img.format:
            ext = img.format.lower()
        else:
            ext = "jpg"

        # increment the counter
        global img_counter
        img_counter += 1
        fname = str(img_counter).zfill(self.fname_digits) + "." + ext
        fpath = self.img_dir + fname
        # save the image file
        f = open(fpath, "wb")
        f.write(response.body)

        # now the task is done
        imageurls.task_done()
Ejemplo n.º 38
0
def _read_trunks():
    global trunks
    global trunks_groups
    if trunks and trunks_groups:
        raise gen.Return(trunks)

    trunks = OrderedDict()
    trunks_groups = OrderedDict()
    try:
        db = pg2.connect(
            'host=192.168.222.20 port=5432 dbname=freeswitch_trunks user=freeswitch password=freeswitch'
        )
        c = db.cursor()
        SELECT = 'select tr.name as group_trunks, trl.phone as trunk_name, o.name as operator_name, o.icon_name, ' \
        'trl.in_trunk_position, c.id as channel_id, trl.direction, d.id as device_id, ' \
        'c.lines as max_lines, c.sip_gateway_name, d.address, c.port, dcl.name as device_class, tr.screen_position, ' \
        'trl.trunk_id ' \
        'from trunk_lines trl ' \
        'left join operators o on (o.id=trl.operator_id) ' \
        'left join trunks tr on (tr.id=trl.trunk_id) ' \
        'left join channels c on (c.id=trl.channel_id) ' \
        'left join devices d on (d.id=c.device_id) ' \
        'left join device_classes dcl on (dcl.id=d.class_id) ' \
        'where (trl.channel_id > 0) and (c.is_active) ' \
        'order by tr.screen_position, group_trunks, trl.in_trunk_position '
        c.execute(SELECT)
        for r in c.fetchall():
            trunk_name = r[9] if r[
                9] else '0' + r[1] if r[1][:2] != '23' else r[1]
            # trunk_name = r[9] if r[9] else '0'+r[1]

            if not trunk_name in trunks:
                trunks[trunk_name] = OrderedDict()
            if not r[0] in trunks_groups:
                trunks_groups[r[0]] = []
            trunks_groups[r[0]].append(trunk_name)
            trunks[trunk_name].update(
                callers=dict(),  # Обрабатываемы(е/й) звонок(и)
                max_lines=r[8],  # channels.lines
                operator_logo='static/img/%s.png' %
                r[3],  # operators.icon_name
                channel_id=r[5],  # channels.id
                counters={
                    'answered': 0,
                    'total': 0,
                    'rejected': 0
                },  # Счётчики
                direction=r[
                    6],  # trunk_lines.directions ['inbound', 'outbound', 'sms']
                channel='%s:%s' %
                (r[10], r[11]),  # device.address, channel.port
                group=r[0],  # trunks.name
                semaphore=Semaphore(1),  # Блокировка транка
                phone=r[1],  # Номер телефона на линии
                device_id=r[7],
                screen_position=r[13],
                trunk_id=r[14],
            )
            print(trunk_name)
        c.close()
        db.close()
    except Exception as e:
        print('ChannelHandler._read_trunks исключение: %s' % e)
    # print ('ChannelHandler._read_trunks данные: %s' % trunks)
    raise gen.Return(trunks)
Ejemplo n.º 39
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        'Authorization': 'token %s' % api_token,
    }
    req = HTTPRequest(
        url=url + '/users',
        headers=auth_header,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()
    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server was culled,
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = 'unknown'

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = inactive.total_seconds() >= inactive_limit
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age.total_seconds() >= max_age:
                app_log.info("Culling server %s (age: %s, inactive for %s)",
                             log_name, format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling server %s (age: %s, inactive for %s)",
                          log_name, format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s/server' % quote(user['name']),
            method='DELETE',
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user"""
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        servers = user.get(
            'servers', {
                '': {
                    'started': user.get('started'),
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                }
            })
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug("Not culling user %s with %i servers still alive",
                          user['name'], still_alive)
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = 'unknown'

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = inactive.total_seconds() >= inactive_limit
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age.total_seconds() >= max_age:
                app_log.info("Culling user %s (age: %s, inactive for %s)",
                             user['name'], format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling user %s (created: %s, last active: %s)",
                          user['name'], format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s' % user['name'],
            method='DELETE',
            headers=auth_header,
        )
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Ejemplo n.º 40
0
class IPCMessageSubscriber(IPCClient):
    '''
    Salt IPC message subscriber

    Create an IPC client to receive messages from IPC publisher

    An example of a very simple IPCMessageSubscriber connecting to an IPCMessagePublisher.
    This example assumes an already running IPCMessagePublisher.

    IMPORTANT: The below example also assumes the IOLoop is NOT running.

    # Import Tornado libs
    import tornado.ioloop

    # Import Salt libs
    import salt.config
    import salt.transport.ipc

    # Create a new IO Loop.
    # We know that this new IO Loop is not currently running.
    io_loop = tornado.ioloop.IOLoop()

    ipc_publisher_socket_path = '/var/run/ipc_publisher.ipc'

    ipc_subscriber = salt.transport.ipc.IPCMessageSubscriber(ipc_server_socket_path, io_loop=io_loop)

    # Connect to the server
    # Use the associated IO Loop that isn't running.
    io_loop.run_sync(ipc_subscriber.connect)

    # Wait for some data
    package = ipc_subscriber.read_sync()
    '''
    def __singleton_init__(self, socket_path, io_loop=None):
        super(IPCMessageSubscriber, self).__singleton_init__(socket_path,
                                                             io_loop=io_loop)
        self._read_sync_future = None
        self._read_stream_future = None
        self._sync_ioloop_running = False
        self.saved_data = []
        self._sync_read_in_progress = Semaphore()
        self.callbacks = set()
        self.reading = False

    @tornado.gen.coroutine
    def _read_sync(self, timeout):
        yield self._sync_read_in_progress.acquire()
        exc_to_raise = None
        ret = None

        try:
            while True:
                if self._read_stream_future is None:
                    self._read_stream_future = self.stream.read_bytes(
                        4096, partial=True)

                if timeout is None:
                    wire_bytes = yield self._read_stream_future
                else:
                    future_with_timeout = FutureWithTimeout(
                        self.io_loop, self._read_stream_future, timeout)
                    wire_bytes = yield future_with_timeout

                self._read_stream_future = None

                # Remove the timeout once we get some data or an exception
                # occurs. We will assume that the rest of the data is already
                # there or is coming soon if an exception doesn't occur.
                timeout = None

                self.unpacker.feed(wire_bytes)
                first = True
                for framed_msg in self.unpacker:
                    if first:
                        ret = framed_msg['body']
                        first = False
                    else:
                        self.saved_data.append(framed_msg['body'])
                if not first:
                    # We read at least one piece of data
                    break
        except TornadoTimeoutError:
            # In the timeout case, just return None.
            # Keep 'self._read_stream_future' alive.
            ret = None
        except tornado.iostream.StreamClosedError as exc:
            log.trace('Subscriber disconnected from IPC %s', self.socket_path)
            self._read_stream_future = None
            exc_to_raise = exc
        except Exception as exc:
            log.error(
                'Exception occurred in Subscriber while handling stream: %s',
                exc)
            self._read_stream_future = None
            exc_to_raise = exc

        if self._sync_ioloop_running:
            # Stop the IO Loop so that self.io_loop.start() will return in
            # read_sync().
            self.io_loop.spawn_callback(self.io_loop.stop)

        if exc_to_raise is not None:
            raise exc_to_raise  # pylint: disable=E0702
        self._sync_read_in_progress.release()
        raise tornado.gen.Return(ret)

    def read_sync(self, timeout=None):
        '''
        Read a message from an IPC socket

        The socket must already be connected.
        The associated IO Loop must NOT be running.
        :param int timeout: Timeout when receiving message
        :return: message data if successful. None if timed out. Will raise an
                 exception for all other error conditions.
        '''
        if self.saved_data:
            return self.saved_data.pop(0)

        self._sync_ioloop_running = True
        self._read_sync_future = self._read_sync(timeout)
        self.io_loop.start()
        self._sync_ioloop_running = False

        ret_future = self._read_sync_future
        self._read_sync_future = None
        return ret_future.result()

    @tornado.gen.coroutine
    def _read_async(self, callback):
        while not self.stream.closed():
            try:
                self._read_stream_future = self.stream.read_bytes(4096,
                                                                  partial=True)
                self.reading = True
                wire_bytes = yield self._read_stream_future
                self._read_stream_future = None
                self.unpacker.feed(wire_bytes)
                for framed_msg in self.unpacker:
                    body = framed_msg['body']
                    self.io_loop.spawn_callback(callback, body)
            except tornado.iostream.StreamClosedError:
                log.trace('Subscriber disconnected from IPC %s',
                          self.socket_path)
                break
            except Exception as exc:
                log.error(
                    'Exception occurred while Subscriber handling stream: %s',
                    exc)
                yield tornado.gen.sleep(1)

    def __run_callbacks(self, raw):
        for callback in self.callbacks:
            self.io_loop.spawn_callback(callback, raw)

    @tornado.gen.coroutine
    def read_async(self):
        '''
        Asynchronously read messages and invoke a callback when they are ready.

        :param callback: A callback with the received data
        '''
        while not self.connected():
            try:
                yield self.connect(timeout=5)
            except tornado.iostream.StreamClosedError:
                log.trace('Subscriber closed stream on IPC %s before connect',
                          self.socket_path)
                yield tornado.gen.sleep(1)
            except Exception as exc:
                log.error('Exception occurred while Subscriber connecting: %s',
                          exc)
                yield tornado.gen.sleep(1)
        yield self._read_async(self.__run_callbacks)

    def close(self):
        '''
        Routines to handle any cleanup before the instance shuts down.
        Sockets and filehandles should be closed explicitly, to prevent
        leaks.
        '''
        if not self._closing:
            IPCClient.close(self)
            if self._closing:
                # This will prevent this message from showing up:
                # '[ERROR   ] Future exception was never retrieved:
                # StreamClosedError'
                if self._read_sync_future is not None and self._read_sync_future.done(
                ):
                    self._read_sync_future.exception()
                if self._read_stream_future is not None and self._read_stream_future.done(
                ):
                    self._read_stream_future.exception()
Ejemplo n.º 41
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              remove_named_servers=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(url=url + '/users', headers=auth_header)
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server, max_age, inactive_limit):
        """Handle (maybe) culling a single server

        "server" is the entire server model from the API.

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        # CUSTOM CULLING TEST CODE HERE
        # Add in additional server tests here.  Return False to mean "don't
        # cull", True means "cull immediately", or, for example, update some
        # other variables like inactive_limit.
        #
        # Here, server['state'] is the result of the get_state method
        # on the spawner.  This does *not* contain the below by
        # default, you may have to modify your spawner to make this
        # work.  The `user` variable is the user model from the API.
        #
        # if server['state']['profile_name'] == 'unlimited'
        #     return False
        # inactive_limit = server['state']['culltime']
        state = server['state']
        # Support getting state from wrapspawer child's conf.
        if 'child_conf' in state:
            state = state['child_conf']
        if 'cull_max_age' in state:
            max_age = max(max_age, state['cull_max_age'])
        if 'cull_inactive_limit' in state:
            inactive_limit = max(inactive_limit, state['cull_inactive_limit'])
        app_log.info(
            f"CULL IDLE: {user['name']}/{server_name}: {max_age} inactive={inactive} inactive_limit={inactive_limit} age={age} last_activity={server['last_activity']}"
        )

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        body = None
        if server_name:
            # culling a named server
            # A named server can be stopped and kept available to the user
            # for starting again or stopped and removed. To remove the named
            # server we have to pass an additional option in the body of our
            # DELETE request.
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
            if remove_named_servers:
                body = json.dumps({"remove": True})
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url,
                          method='DELETE',
                          headers=auth_header,
                          body=body,
                          allow_nonstandard_methods=True)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server, max_age, inactive_limit)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user['name'],
                still_alive,
            )
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user['name'],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user['name'],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(url=url + '/users/%s' % user['name'],
                          method='DELETE',
                          headers=auth_header)
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Ejemplo n.º 42
0
 def __init__(self, method='ascii', **kwargs):
     super(AsyncModbusGeneratorClient, self).__init__(method=method,
                                                      **kwargs)
     self.sem = Semaphore(1)
Ejemplo n.º 43
0
async def cull_idle(api_url,
                    base_url,
                    api_token,
                    inactive_limit,
                    max_age=0,
                    warn_timeout=0,
                    concurrency=10,
                    verify_ssl=True):
    """Shutdown idle single-user servers
    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        'Authorization': 'token %s' % api_token,
    }
    req = HTTPRequest(
        url=api_url + '/users',
        headers=auth_header,
        validate_cert=verify_ssl,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        async def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            await semaphore.acquire()
            try:
                return (await client.fetch(req))
            finally:
                semaphore.release()
    else:
        fetch = client.fetch

    # tornado.curl_httpclient.CurlError: HTTP 599: Connection timed out after 20003 milliseconds
    # Potential timeout error here? (slow to stop line: 478)
    resp = await fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    async def get_server_active(server):
        server_url = urljoin(base_url, server['url'])
        app_log.debug('Server url: %s', server_url)

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check server status
        num_kernels = 0
        try:
            # status query does not change last_activity on notebook server
            req = HTTPRequest(
                url=urljoin(server_url, 'api/status'),
                headers=auth_header,
                validate_cert=verify_ssl,
            )
            resp = await fetch(req)
            status = json.loads(resp.body.decode('utf-8', 'replace'))
            # app_log.info(status)
            inactive = [now - parse_date(status['last_activity'])]
            num_kernels = status['kernels']
        except HTTPClientError as e:
            app_log.error('Failed to get notebook status: %s', e)
            # make sure inactive is defined
            inactive = [age]

        # if an error happened, then num_kernels is still 0
        # TODO: for now kernel activity tracking is deactivated
        # code below is problematic ... it triggers an update of last activity on
        # the notebook server ... also should look into other activites like open shell (process?)
        # a busy cell that finishes updates last_activity as well
        # Also it seems, that a user has to keep the notebook in an open tab visible/foreground ....
        #.   putting tab a side does not help.not
        #.   minifing browser window neither or moving off screen neither.
        #    hiding browser window with anothe window stops refreshing as well
        #.   jupyterlab stops polling if document.hidden is true (old interface doesn't poll at all)
        #.   -> we could also hook into here ... and add a 'keep-alive' extension, that keeps polling (at a slower interval or so?)
        # TODO: to make this more reliable, we should install a notebook api/service extension,
        #.      that tracks all the activity we want. This allows us to use the internal
        #       notebook API and container/host process inspection to look at more things as well
        if not num_kernels:
            # no kernel running
            return True, min(inactive), age

        # FIXME: hardcoded skip rest of activity checking
        return True, min(inactive), age

        # assume everything is idle
        idle = True
        # kernels:
        # TODO: we ar ecalling through the proxy here.... which will update
        #       the hubs view of inactivity :(
        if app_log.isEnabledFor(logging.DEBUG):
            app_log.debug('Query kernels %s', urljoin(server_url,
                                                      'api/kernels'))
        req = HTTPRequest(
            url=urljoin(server_url, 'api/kernels'),
            headers=auth_header,
            validate_cert=verify_ssl,
        )
        try:
            resp = await fetch(req)
            kernels = json.loads(resp.body.decode('utf-8', 'replace'))
            for kernel in kernels:
                # TODO: seems like kernel state stays in 'starting' after a restart and auto
                #       re-creation of running kernels from last ui state
                idle = idle and (kernel['execution_state']
                                 in ('idle', 'starting'))
                inactive.append(now - parse_date(kernel['last_activity']))
        except HTTPClientError as e:
            app_log.error('Falid to inspect notebook kernels: %s', e)
        # find smallest inactive time
        return idle, min(inactive), age

    async def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server
        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        # import ipdb; ipdb.set_trace()
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        idle, inactive, age = await get_server_active(server)
        if not idle and app_log.isEnabledFor(logging.DEBUG):
            # something is not idle
            # when the kernel transitions from busy to idle, the kernel resets the
            # inactive timer as well.
            app_log.debug(
                'Not culling server %s with busy connections. (inactive for %s)',
                log_name, inactive)
            return

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info("Culling server %s (age: %s, inactive for %s)",
                             log_name, format_td(age), format_td(inactive))
                should_cull = True

        # should we warn?
        remaining = inactive_limit - inactive.total_seconds()
        if should_warn(user, warn_timeout, inactive) and remaining > 0:
            IOLoop.current().run_in_executor(
                None, send_email, user, {
                    'serverurl': urljoin(base_url, server['url']),
                    'inactive': human_seconds(inactive.total_seconds()),
                    'remaining': human_seconds(remaining),
                })

        if not should_cull:
            app_log.debug("Not culling server %s (age: %s, inactive for %s)",
                          log_name, format_td(age), format_td(inactive))
            return False

        if server_name:
            # culling a named server
            delete_url = api_url + "/users/%s/servers/%s" % (quote(
                user['name']), quote(server['name']))
        else:
            delete_url = api_url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(
            url=delete_url,
            method='DELETE',
            headers=auth_header,
            validate_cert=verify_ssl,
        )
        resp = await fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    async def handle_user(user):
        """Handle one user.
        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.

        servers = user['servers']
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = await multi(server_futures)

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = await f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Ejemplo n.º 44
0
    return int(round(time.time() * 1000))


request_users = {}


class User(object):
    def __init__(self):
        self.last_visit_time = 0
        self.req_num = 0


index = 0
worker_stop = 0
concurrent_worker_count = 20
consumer = Semaphore(concurrent_worker_count)
request_queue = Queue(maxsize=5000)


async def worker(worker_id):
    print("worker {} start".format(worker_id))
    while not worker_stop:
        await consumer.acquire()
        req_handler = await request_queue.get()
        print("worker {} work".format(worker_id))
        try:
            global index
            remote_ip = req_handler.request.remote_ip
            message = "love you liuchen {} {}".format(remote_ip, index)
            index = index + 1
            #process grpc here
Ejemplo n.º 45
0
Archivo: ipc.py Proyecto: bryson/salt
class IPCMessageSubscriber(IPCClient):
    '''
    Salt IPC message subscriber

    Create an IPC client to receive messages from IPC publisher

    An example of a very simple IPCMessageSubscriber connecting to an IPCMessagePublisher.
    This example assumes an already running IPCMessagePublisher.

    IMPORTANT: The below example also assumes the IOLoop is NOT running.

    # Import Tornado libs
    import tornado.ioloop

    # Import Salt libs
    import salt.config
    import salt.transport.ipc

    # Create a new IO Loop.
    # We know that this new IO Loop is not currently running.
    io_loop = tornado.ioloop.IOLoop()

    ipc_publisher_socket_path = '/var/run/ipc_publisher.ipc'

    ipc_subscriber = salt.transport.ipc.IPCMessageSubscriber(ipc_server_socket_path, io_loop=io_loop)

    # Connect to the server
    # Use the associated IO Loop that isn't running.
    io_loop.run_sync(ipc_subscriber.connect)

    # Wait for some data
    package = ipc_subscriber.read_sync()
    '''
    def __singleton_init__(self, socket_path, io_loop=None):
        super(IPCMessageSubscriber, self).__singleton_init__(
            socket_path, io_loop=io_loop)
        self._read_sync_future = None
        self._read_stream_future = None
        self._sync_ioloop_running = False
        self.saved_data = []
        self._sync_read_in_progress = Semaphore()

    @tornado.gen.coroutine
    def _read_sync(self, timeout):
        yield self._sync_read_in_progress.acquire()
        exc_to_raise = None
        ret = None

        try:
            while True:
                if self._read_stream_future is None:
                    self._read_stream_future = self.stream.read_bytes(4096, partial=True)

                if timeout is None:
                    wire_bytes = yield self._read_stream_future
                else:
                    future_with_timeout = FutureWithTimeout(
                        self.io_loop, self._read_stream_future, timeout)
                    wire_bytes = yield future_with_timeout

                self._read_stream_future = None

                # Remove the timeout once we get some data or an exception
                # occurs. We will assume that the rest of the data is already
                # there or is coming soon if an exception doesn't occur.
                timeout = None

                self.unpacker.feed(wire_bytes)
                first = True
                for framed_msg in self.unpacker:
                    if first:
                        ret = framed_msg['body']
                        first = False
                    else:
                        self.saved_data.append(framed_msg['body'])
                if not first:
                    # We read at least one piece of data
                    break
        except tornado.ioloop.TimeoutError:
            # In the timeout case, just return None.
            # Keep 'self._read_stream_future' alive.
            ret = None
        except tornado.iostream.StreamClosedError as exc:
            log.trace('Subscriber disconnected from IPC {0}'.format(self.socket_path))
            self._read_stream_future = None
            exc_to_raise = exc
        except Exception as exc:
            log.error('Exception occurred in Subscriber while handling stream: {0}'.format(exc))
            self._read_stream_future = None
            exc_to_raise = exc

        if self._sync_ioloop_running:
            # Stop the IO Loop so that self.io_loop.start() will return in
            # read_sync().
            self.io_loop.spawn_callback(self.io_loop.stop)

        if exc_to_raise is not None:
            raise exc_to_raise  # pylint: disable=E0702
        self._sync_read_in_progress.release()
        raise tornado.gen.Return(ret)

    def read_sync(self, timeout=None):
        '''
        Read a message from an IPC socket

        The socket must already be connected.
        The associated IO Loop must NOT be running.
        :param int timeout: Timeout when receiving message
        :return: message data if successful. None if timed out. Will raise an
                 exception for all other error conditions.
        '''
        if self.saved_data:
            return self.saved_data.pop(0)

        self._sync_ioloop_running = True
        self._read_sync_future = self._read_sync(timeout)
        self.io_loop.start()
        self._sync_ioloop_running = False

        ret_future = self._read_sync_future
        self._read_sync_future = None
        return ret_future.result()

    @tornado.gen.coroutine
    def _read_async(self, callback):
        while not self.stream.closed():
            try:
                self._read_stream_future = self.stream.read_bytes(4096, partial=True)
                wire_bytes = yield self._read_stream_future
                self._read_stream_future = None
                self.unpacker.feed(wire_bytes)
                for framed_msg in self.unpacker:
                    body = framed_msg['body']
                    self.io_loop.spawn_callback(callback, body)
            except tornado.iostream.StreamClosedError:
                log.trace('Subscriber disconnected from IPC {0}'.format(self.socket_path))
                break
            except Exception as exc:
                log.error('Exception occurred while Subscriber handling stream: {0}'.format(exc))

    @tornado.gen.coroutine
    def read_async(self, callback):
        '''
        Asynchronously read messages and invoke a callback when they are ready.

        :param callback: A callback with the received data
        '''
        while not self.connected():
            try:
                yield self.connect(timeout=5)
            except tornado.iostream.StreamClosedError:
                log.trace('Subscriber closed stream on IPC {0} before connect'.format(self.socket_path))
                yield tornado.gen.sleep(1)
            except Exception as exc:
                log.error('Exception occurred while Subscriber connecting: {0}'.format(exc))
                yield tornado.gen.sleep(1)
        yield self._read_async(callback)

    def close(self):
        '''
        Routines to handle any cleanup before the instance shuts down.
        Sockets and filehandles should be closed explicitly, to prevent
        leaks.
        '''
        if not self._closing:
            IPCClient.close(self)
            # This will prevent this message from showing up:
            # '[ERROR   ] Future exception was never retrieved:
            # StreamClosedError'
            if self._read_sync_future is not None:
                self._read_sync_future.exc_info()
            if self._read_stream_future is not None:
                self._read_stream_future.exc_info()

    def __del__(self):
        if IPCMessageSubscriber in globals():
            self.close()