예제 #1
0
def __pyinotify_event(notifier):
    # lock and batch multiple inotify events
    if __AccountsSingleton().lock[0]:
        return
    __AccountsSingleton().lock[0] = True
    io_loop = IOLoop().instance()
    io_loop.add_timeout(UPDATE_DELAY, __batch_update)
예제 #2
0
def clean_timeout_users():
    print("clean timeout users")
    current_time = current_time_mills()
    for key in request_users:
        if current_time - request_users[key].last_visit_time > 10 * 60 * 1000:
            print("del %s", key)
            del request_users[key]

    IOLoop.add_timeout(IOLoop.current(),
                       deadline=time.time() + 1 * 60 * 1000,
                       callback=clean_timeout_users)
예제 #3
0
 def _add_timeout(deadline, callback):
     timeout = IOLoop.add_timeout(self.io_loop, deadline, callback)
     timeouts.append(
         Timeout(callback=callback,
                 deadline=deadline,
                 reference=timeout))
     return timeout
예제 #4
0
 def _add_timeout(deadline, callback):
     timeout = IOLoop.add_timeout(self.io_loop, deadline, callback)
     timeouts.append(Timeout(
         callback=callback,
         deadline=deadline,
         reference=timeout
     ))
     return timeout
예제 #5
0
class IOLoop(object):
    NONE = TornadoIOLoop.NONE
    READ = TornadoIOLoop.READ
    WRITE = TornadoIOLoop.WRITE
    ERROR = TornadoIOLoop.ERROR

    def __init__(self):
        self._tornado_io_loop = TornadoIOLoop()

    def inner(self):
        return self._tornado_io_loop

    def close(self, all_fds=False):
        self._tornado_io_loop.close(all_fds)

    def add_handler(self, fd, handler, events):
        self._tornado_io_loop.add_handler(fd, handler, events)

    def update_handler(self, fd, events):
        self._tornado_io_loop.update_handler(fd, events)

    def remove_handler(self, fd):
        self._tornado_io_loop.remove_handler(fd)

    def start(self):
        self._tornado_io_loop.start()

    def stop(self):
        self._tornado_io_loop.stop()

    def time(self):
        return self._tornado_io_loop.time()

    def add_timeout(self, deadline, callback):
        return self._tornado_io_loop.add_timeout(deadline, callback)

    def remove_timeout(self, timeout):
        self._tornado_io_loop.remove_timeout(timeout)

    def add_callback(self, callback, *args, **kwargs):
        self._tornado_io_loop.add_callback(callback, *args, **kwargs)

    def run(self):
        try:
            self.start()
        except KeyboardInterrupt:
            print ""
            print "Ctrl-C recieved. Exiting."
예제 #6
0
    current_time = current_time_mills()
    for key in request_users:
        if current_time - request_users[key].last_visit_time > 10 * 60 * 1000:
            print("del %s", key)
            del request_users[key]

    IOLoop.add_timeout(IOLoop.current(),
                       deadline=time.time() + 1 * 60 * 1000,
                       callback=clean_timeout_users)


async def worker_runner():
    # Join all workers.
    await gen.multi([worker(i) for i in range(concurrent_worker_count)])


def make_app():
    return tornado.web.Application([
        (r"/", MainHandler),
    ])


if __name__ == "__main__":
    app = make_app()
    app.listen(8889)
    IOLoop.add_timeout(IOLoop.current(),
                       deadline=time.time() + 1 * 60 * 1000,
                       callback=clean_timeout_users)
    IOLoop.run_sync(IOLoop.current(), worker_runner)
    IOLoop.current().start()
예제 #7
0
파일: ioloop.py 프로젝트: HUSTGOC/electrumq
class IOLoop(threading.Thread):
    _futures = []
    loop_interval = 100  # ms
    loop_quit_wait = MAX_WAIT_SECONDS_BEFORE_SHUTDOWN  # second

    def __init__(self):
        threading.Thread.__init__(self)
        self.ioloop = TornadoIOLoop()

    def run(self):
        logger.debug('ioloop starting')

        def add_features():
            if not self._futures:
                pass
            else:
                need_add = self._futures[:]
                self._futures = []
                for each in need_add:
                    self.ioloop.add_future(each[0], each[1])

        PeriodicCallback(add_features, self.loop_interval, self.ioloop).start()
        self.ioloop.start()

    def add_future(self, future, callback=None):
        def nothing(future, **kwargs):
            pass

        if callback is None:
            callback = nothing
        # else:
        #     feature.add_done_callback(callback)
        self._futures.append((future, callback))

    def add_periodic(self, feature, interval=1000):
        if self.ioloop._timeouts is None:
            self.ioloop._timeouts = []
        PeriodicCallback(feature, interval, self.ioloop).start()

    def add_timeout(self, deadline, callback, *args, **kwargs):
        self.ioloop.add_timeout(deadline, callback, *args, **kwargs)

    def time(self):
        return self.ioloop.time()

    def quit(self):
        logger.info('begin to quit')
        self.ioloop.add_callback(self._quit)

    def _quit(self):
        """

        :return:
        """
        logger.info('Will shutdown in %s seconds ...',
                    MAX_WAIT_SECONDS_BEFORE_SHUTDOWN)
        io_loop = self.ioloop

        deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN

        def stop_loop():
            """

            :return:
            """
            now = time.time()
            step = 0.01
            if now < deadline and (io_loop._callbacks
                                   or len(io_loop._timeouts) > 1):
                io_loop.add_timeout(max(now + step, deadline), stop_loop)
            else:
                io_loop.stop()
                io_loop.close()
                io_loop.clear_current()
                io_loop.clear_instance()
                logger.info('Shutdown')

        stop_loop()
예제 #8
0
class TornadoOctopus(object):
    def __init__(
            self, concurrency=10, auto_start=False, cache=False,
            expiration_in_seconds=30, request_timeout_in_seconds=10,
            connect_timeout_in_seconds=5, ignore_pycurl=False,
            limiter=None, allow_connection_reuse=True):

        self.concurrency = concurrency
        self.auto_start = auto_start
        self.last_timeout = None

        self.cache = cache
        self.response_cache = Cache(expiration_in_seconds=expiration_in_seconds)
        self.request_timeout_in_seconds = request_timeout_in_seconds
        self.connect_timeout_in_seconds = connect_timeout_in_seconds

        self.ignore_pycurl = ignore_pycurl

        self.running_urls = 0
        self.url_queue = []

        if PYCURL_AVAILABLE and not self.ignore_pycurl:
            logging.debug('pycurl is available, thus Octopus will be using it instead of tornado\'s simple http client.')
            AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
            self.allow_connection_reuse = allow_connection_reuse
        else:
            self.allow_connection_reuse = True

        if auto_start:
            logging.debug('Auto starting...')
            self.start()

        self.limiter = limiter

    @property
    def queue_size(self):
        return len(self.url_queue)

    @property
    def is_empty(self):
        return self.queue_size == 0

    def start(self):
        logging.debug('Creating IOLoop and http_client.')
        self.ioloop = IOLoop()
        self.http_client = AsyncHTTPClient(io_loop=self.ioloop)

    @classmethod
    def from_tornado_response(cls, url, response):
        cookies = response.request.headers.get('Cookie', '')
        if cookies:
            cookies = dict([cookie.split('=') for cookie in cookies.split(';')])

        return Response(
            url=url, status_code=response.code,
            headers=dict([(key, value) for key, value in response.headers.items()]),
            cookies=cookies,
            text=response.body, effective_url=response.effective_url,
            error=response.error and str(response.error) or None,
            request_time=response.request_time
        )

    def enqueue(self, url, handler, method='GET', **kw):
        logging.debug('Enqueueing %s...' % url)

        if self.cache:
            response = self.response_cache.get(url)

            if response is not None:
                logging.debug('Cache hit on %s.' % url)
                handler(url, response)
                return

        if self.running_urls < self.concurrency:
            logging.debug('Queue has space available for fetching %s.' % url)
            self.get_next_url(url, handler, method, **kw)
        else:
            logging.debug('Queue is full. Enqueueing %s for future fetch.' % url)
            self.url_queue.append((url, handler, method, kw))

    def fetch(self, url, handler, method, **kw):
        self.running_urls += 1

        if self.cache:
            response = self.response_cache.get(url)

            if response is not None:
                logging.debug('Cache hit on %s.' % url)
                self.running_urls -= 1
                handler(url, response)
                return

        logging.info('Fetching %s...' % url)

        request = HTTPRequest(
            url=url,
            method=method,
            connect_timeout=self.connect_timeout_in_seconds,
            request_timeout=self.request_timeout_in_seconds,
            prepare_curl_callback=self.handle_curl_callback,
            **kw
        )

        self.http_client.fetch(request, self.handle_request(url, handler))

    def handle_curl_callback(self, curl):
        if not self.allow_connection_reuse:
            curl.setopt(pycurl.FRESH_CONNECT, 1)

    def get_next_url(self, request_url=None, handler=None, method=None, **kw):
        if request_url is None:
            if not self.url_queue:
                return

            request_url, handler, method, kw = self.url_queue.pop()

        self.fetch_next_url(request_url, handler, method, **kw)

    def fetch_next_url(self, request_url, handler, method, **kw):
        if self.limiter and not self.limiter.acquire(request_url):
            logging.info('Could not acquire limit for url "%s".' % request_url)

            self.url_queue.append((request_url, handler, method, kw))
            deadline = timedelta(seconds=self.limiter.limiter_miss_timeout_ms / 1000.0)
            self.ioloop.add_timeout(deadline, self.get_next_url)
            self.limiter.publish_lock_miss(request_url)
            return False

        logging.debug('Queue has space available for fetching %s.' % request_url)
        self.fetch(request_url, handler, method, **kw)
        return True

    def handle_request(self, url, callback):
        def handle(response):
            logging.debug('Handler called for url %s...' % url)
            self.running_urls -= 1

            response = self.from_tornado_response(url, response)
            logging.info('Got response(%s) from %s.' % (response.status_code, url))

            if self.cache and response and response.status_code < 399:
                logging.debug('Putting %s into cache.' % url)
                self.response_cache.put(url, response)

            if self.limiter:
                self.limiter.release(url)

            try:
                callback(url, response)
            except Exception:
                logging.exception('Error calling callback for %s.' % url)

            if self.running_urls < self.concurrency and self.url_queue:
                self.get_next_url()

            logging.debug('Getting %d urls and still have %d more urls to get...' % (self.running_urls, self.remaining_requests))
            if self.running_urls < 1 and self.remaining_requests == 0:
                logging.debug('Nothing else to get. Stopping Octopus...')
                self.stop()

        return handle

    def handle_wait_timeout(self, signal_number, frames):
        logging.debug('Timeout waiting for IOLoop to finish. Stopping IOLoop manually.')
        self.stop(force=True)

    def wait(self, timeout=10):
        self.last_timeout = timeout
        if not self.url_queue and not self.running_urls:
            logging.debug('No urls to wait for. Returning immediately.')
            return

        if timeout:
            logging.debug('Waiting for urls to be retrieved for %s seconds.' % timeout)
            self.ioloop.set_blocking_signal_threshold(timeout, self.handle_wait_timeout)
        else:
            logging.debug('Waiting for urls to be retrieved.')

        logging.info('Starting IOLoop with %d URLs still left to process.' % self.remaining_requests)
        self.ioloop.start()

    @property
    def remaining_requests(self):
        return len(self.url_queue)

    def stop(self, force=False):
        logging.info('Stopping IOLoop with %d URLs still left to process.' % self.remaining_requests)
        self.ioloop.stop()
예제 #9
0
class TornadoOctopus(object):
    def __init__(self,
                 concurrency=10,
                 auto_start=False,
                 cache=False,
                 expiration_in_seconds=30,
                 request_timeout_in_seconds=10,
                 connect_timeout_in_seconds=5,
                 ignore_pycurl=False,
                 limiter=None,
                 allow_connection_reuse=True):

        self.concurrency = concurrency
        self.auto_start = auto_start
        self.last_timeout = None

        self.cache = cache
        self.response_cache = Cache(
            expiration_in_seconds=expiration_in_seconds)
        self.request_timeout_in_seconds = request_timeout_in_seconds
        self.connect_timeout_in_seconds = connect_timeout_in_seconds

        self.ignore_pycurl = ignore_pycurl

        self.running_urls = 0
        self.url_queue = []

        if PYCURL_AVAILABLE and not self.ignore_pycurl:
            logging.debug(
                'pycurl is available, thus Octopus will be using it instead of tornado\'s simple http client.'
            )
            AsyncHTTPClient.configure(
                "tornado.curl_httpclient.CurlAsyncHTTPClient")
            self.allow_connection_reuse = allow_connection_reuse
        else:
            self.allow_connection_reuse = True

        if auto_start:
            logging.debug('Auto starting...')
            self.start()

        self.limiter = limiter

    @property
    def queue_size(self):
        return len(self.url_queue)

    @property
    def is_empty(self):
        return self.queue_size == 0

    def start(self):
        logging.debug('Creating IOLoop and http_client.')
        self.ioloop = IOLoop()
        self.http_client = AsyncHTTPClient(io_loop=self.ioloop)

    @classmethod
    def from_tornado_response(cls, url, response):
        cookies = response.request.headers.get('Cookie', '')
        if cookies:
            cookies = dict(
                [cookie.split('=') for cookie in cookies.split(';')])

        return Response(url=url,
                        status_code=response.code,
                        headers=dict([
                            (key, value)
                            for key, value in response.headers.items()
                        ]),
                        cookies=cookies,
                        text=response.body,
                        effective_url=response.effective_url,
                        error=response.error and str(response.error) or None,
                        request_time=response.request_time)

    def enqueue(self, url, handler, method='GET', **kw):
        logging.debug('Enqueueing %s...' % url)

        if self.cache:
            response = self.response_cache.get(url)

            if response is not None:
                logging.debug('Cache hit on %s.' % url)
                handler(url, response)
                return

        if self.running_urls < self.concurrency:
            logging.debug('Queue has space available for fetching %s.' % url)
            self.get_next_url(url, handler, method, **kw)
        else:
            logging.debug('Queue is full. Enqueueing %s for future fetch.' %
                          url)
            self.url_queue.append((url, handler, method, kw))

    def fetch(self, url, handler, method, **kw):
        self.running_urls += 1

        if self.cache:
            response = self.response_cache.get(url)

            if response is not None:
                logging.debug('Cache hit on %s.' % url)
                self.running_urls -= 1
                handler(url, response)
                return

        logging.info('Fetching %s...' % url)

        request = HTTPRequest(url=url,
                              method=method,
                              connect_timeout=self.connect_timeout_in_seconds,
                              request_timeout=self.request_timeout_in_seconds,
                              prepare_curl_callback=self.handle_curl_callback,
                              **kw)

        self.http_client.fetch(request, self.handle_request(url, handler))

    def handle_curl_callback(self, curl):
        if not self.allow_connection_reuse:
            curl.setopt(pycurl.FRESH_CONNECT, 1)

    def get_next_url(self, request_url=None, handler=None, method=None, **kw):
        if request_url is None:
            if not self.url_queue:
                return

            request_url, handler, method, kw = self.url_queue.pop()

        self.fetch_next_url(request_url, handler, method, **kw)

    def fetch_next_url(self, request_url, handler, method, **kw):
        if self.limiter and not self.limiter.acquire(request_url):
            logging.info('Could not acquire limit for url "%s".' % request_url)

            self.url_queue.append((request_url, handler, method, kw))
            deadline = timedelta(seconds=self.limiter.limiter_miss_timeout_ms /
                                 1000.0)
            self.ioloop.add_timeout(deadline, self.get_next_url)
            self.limiter.publish_lock_miss(request_url)
            return False

        logging.debug('Queue has space available for fetching %s.' %
                      request_url)
        self.fetch(request_url, handler, method, **kw)
        return True

    def handle_request(self, url, callback):
        def handle(response):
            logging.debug('Handler called for url %s...' % url)
            self.running_urls -= 1

            response = self.from_tornado_response(url, response)
            logging.info('Got response(%s) from %s.' %
                         (response.status_code, url))

            if self.cache and response and response.status_code < 399:
                logging.debug('Putting %s into cache.' % url)
                self.response_cache.put(url, response)

            if self.limiter:
                self.limiter.release(url)

            try:
                callback(url, response)
            except Exception:
                logging.exception('Error calling callback for %s.' % url)

            if self.running_urls < self.concurrency and self.url_queue:
                self.get_next_url()

            logging.debug(
                'Getting %d urls and still have %d more urls to get...' %
                (self.running_urls, self.remaining_requests))
            if self.running_urls < 1 and self.remaining_requests == 0:
                logging.debug('Nothing else to get. Stopping Octopus...')
                self.stop()

        return handle

    def handle_wait_timeout(self, signal_number, frames):
        logging.debug(
            'Timeout waiting for IOLoop to finish. Stopping IOLoop manually.')
        self.stop(force=True)

    def wait(self, timeout=10):
        self.last_timeout = timeout
        if not self.url_queue and not self.running_urls:
            logging.debug('No urls to wait for. Returning immediately.')
            return

        if timeout:
            logging.debug('Waiting for urls to be retrieved for %s seconds.' %
                          timeout)
            self.ioloop.set_blocking_signal_threshold(timeout,
                                                      self.handle_wait_timeout)
        else:
            logging.debug('Waiting for urls to be retrieved.')

        logging.info('Starting IOLoop with %d URLs still left to process.' %
                     self.remaining_requests)
        self.ioloop.start()

    @property
    def remaining_requests(self):
        return len(self.url_queue)

    def stop(self, force=False):
        logging.info('Stopping IOLoop with %d URLs still left to process.' %
                     self.remaining_requests)
        self.ioloop.stop()
예제 #10
0
파일: process.py 프로젝트: zw0610/swimpy
class SwimpyProcess(multiprocessing.Process):
    def __init__(self, routes, node, pipe, *args, **kwargs):
        super(self.__class__, self).__init__(*args, **kwargs)
        self.routes = routes
        self.node = node
        self.pipe = pipe

        self.bind_addr = node.addr
        self.bind_port = node.port

        # We don't want to initialize these until after we fork
        self.ioloop = None
        self.server = None
        self.app = None

    def _handle_pipe_messages(self, *args, **kwargs):
        message = self.pipe.recv()
        unpacked_message = msgpack.unpackb(message)
        message_type = unpacked_message.pop('type')
        if message_type == Sync.MESSAGE_TYPE:
            self.pipe.send(self.app.nodes)

    def run(self):
        signal.signal(signal.SIGTERM, self.shutdown_sig_handler)
        signal.signal(signal.SIGINT, self.shutdown_sig_handler)

        from tornado.ioloop import IOLoop
        self.ioloop = IOLoop().current()  # get a reference to the IOLoop post-fork

        LOGGER.info('Starting server on tcp://{}:{}'.format(self.bind_addr, self.bind_port))
        self.app = Application(routes=self.routes, node=self.node, pipe=self.pipe)
        self.server = Server(message_handler=self.app.route_stream_message)
        self.server.listen(self.bind_port, address=self.bind_addr)

        self.ioloop.add_handler(self.pipe, self._handle_pipe_messages, self.ioloop.READ)
        self.ioloop.spawn_callback(self.app.send_buffered_gossip)

        # Ping a random node every PING_INVERVAL seconds, +/- 10%
        # This jitter should reduce the average aggregate peak network throughput
        # when running with large cluster sized
        PeriodicCallbackWithSplay(self.app.ping_random_node,
                                  PING_INTERVAL * 1000,
                                  splay_pct=10).start()

        LOGGER.info('Starting ioloop')
        self.ioloop.start()

    def stop(self, shutdown_timeout=SHUTDOWN_TIMEOUT, graceful=True):
        """
        Trigger a graceful stop of the server and the server's ioloop, allowing in-flight
        connections to finish

        Fall back to a "less graceful" stop when stop is reached
        """
        def poll_stop():
            # Tornado uses a "waker" handler internally that we'd like to ignore here
            remaining_handlers = {
                k: v
                for k, v in self.ioloop._handlers.iteritems()
                if k != self.ioloop._waker.fileno()
            }

            # Poll for any remaining connections
            remaining_count = len(remaining_handlers)

            # Once all handlers have been removed, we can stop safely
            if remaining_count == 0:
                LOGGER.info('All finished! Graceful stop complete')
                self.ioloop.stop()
            else:
                LOGGER.info('Waiting on IO handlers ({} remaining). '
                            'Handlers: {!r}'.format(remaining_count,
                                                    remaining_handlers))

        self.ioloop.remove_handler(self.pipe)
        self.server.shutdown()
        self.server.stop()

        if graceful:
            # Poll the IOLoop's handlers until they all shut down.
            poller = PeriodicCallback(poll_stop, 500, io_loop=self.ioloop)
            poller.start()
            self.ioloop.add_timeout(self.ioloop.time() + shutdown_timeout,
                                    self.ioloop.stop)
        else:
            self.ioloop.stop()

    def shutdown_sig_handler(self, sig, frame):
        """
        Signal handler for "stop" signals (SIGTERM, SIGINT)
        """
        LOGGER.warning('{!r} caught signal {!r}! Shutting down'.format(self, sig))

        try:
            self.ioloop.add_callback_from_signal(self.stop)
        except Exception as e:
            LOGGER.error(
                'Encountered exception while shutting down: {}'.format(e)
            )
            sys.exit(1)