class MalcolmServerConnect:
    def __init__(self, socket):
        self.socket = socket
        self.request = None
        self.response = None
        self._loop = IOLoop()

    @gen.coroutine
    def message_coroutine(self):
        conn = yield websocket_connect("ws://localhost:%s/ws" % self.socket)
        msg = json.dumps(self.request)
        conn.write_message(msg)
        resp = yield conn.read_message()
        self.response = json.loads(resp, object_pairs_hook=OrderedDict)
        conn.close()

    def send_subscribe(self, path, delta):
        self.request = OrderedDict()
        self.request['typeid'] = "malcolm:core/Subscribe:1.0"
        self.request['id'] = "CANNED"
        self.request['path'] = path
        self.request['delta'] = delta
        self._loop.run_sync(self.message_coroutine)
        if delta:
            for change in self.response['changes']:
                if len(change) == 2 and isinstance(change[1], dict) and \
                        "timeStamp" in change[1]:
                    for endpoint in ("secondsPastEpoch", "nanoseconds"):
                        change[1]['timeStamp'][endpoint] = 0

    def subscribe_request_response(self, path, delta=True):
        self.send_subscribe(path, delta)
        request_msg = json.dumps(self.request, indent=2)
        response_msg = json.dumps(self.response, indent=2)
        return request_msg, response_msg
예제 #2
0
def scheduler(cport, **kwargs):
    q = Queue()

    proc = Process(target=run_scheduler, args=(q, cport), kwargs=kwargs)
    proc.daemon = True
    proc.start()

    sport = q.get()

    s = rpc(ip='127.0.0.1', port=sport)
    loop = IOLoop()
    start = time()
    while True:
        ncores = loop.run_sync(s.ncores)
        if ncores:
            break
        if time() - start > 5:
            raise Exception("Timeout on cluster creation")
        sleep(0.01)

    try:
        yield sport
    finally:
        loop = IOLoop()
        with ignoring(socket.error, TimeoutError, StreamClosedError):
            loop.run_sync(lambda: disconnect('127.0.0.1', sport), timeout=0.5)
        proc.terminate()
예제 #3
0
파일: test_tasks.py 프로젝트: 1-Hash/Toto
 def test_instance_pool(self):
   instance1 = _Instance()
   instance2 = _Instance()
   pool = InstancePool([instance1, instance2])
   pool.increment()
   pool.increment()
   self.assertEquals(instance1.value(), 1)
   self.assertEquals(instance2.value(), 1)
   pool.transaction(lambda i: i.increment())
   pool.transaction(lambda i: i.increment())
   self.assertEquals(instance1.value(), 2)
   self.assertEquals(instance2.value(), 2)
   @coroutine
   def yield_tasks():
     self.assertEquals((yield pool.await().increment()), 3)
     self.assertEquals((yield pool.await().increment()), 3)
     self.assertEquals(instance1.value(), 3)
     self.assertEquals(instance2.value(), 3)
     self.assertEquals((yield pool.await_transaction(lambda i: i.increment())), 4)
     self.assertEquals((yield pool.await_transaction(lambda i: i.increment())), 4)
   loop = IOLoop()
   loop.make_current()
   loop.run_sync(yield_tasks)
   self.assertEquals(instance1.value(), 4)
   self.assertEquals(instance2.value(), 4)
예제 #4
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        self.server_ioloop = IOLoop()
        event = threading.Event()

        @gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([("/", HelloWorldHandler)])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)
            event.set()

        def start():
            self.server_ioloop.run_sync(init_server)
            self.server_ioloop.start()

        self.server_thread = threading.Thread(target=start)
        self.server_thread.start()
        event.wait()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by several iterations because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticeable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).

            @gen.coroutine
            def slow_stop():
                # The number of iterations is difficult to predict. Typically,
                # one is sufficient, although sometimes it needs more.
                for i in range(5):
                    yield
                self.server_ioloop.stop()

            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return "http://127.0.0.1:%d%s" % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url("/"))
        self.assertEqual(b"Hello world!", response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url("/notfound"))
        self.assertEqual(assertion.exception.code, 404)
예제 #5
0
def cluster(nworkers=2, nanny=False, worker_kwargs={}):
    if nanny:
        _run_worker = run_nanny
    else:
        _run_worker = run_worker
    scheduler_q = Queue()
    scheduler = Process(target=run_scheduler, args=(scheduler_q,))
    scheduler.daemon = True
    scheduler.start()
    sport = scheduler_q.get()

    workers = []
    for i in range(nworkers):
        q = Queue()
        fn = '_test_worker-%s' % uuid.uuid1()
        proc = Process(target=_run_worker, args=(q, sport),
                        kwargs=merge({'ncores': 1, 'local_dir': fn},
                                     worker_kwargs))
        workers.append({'proc': proc, 'queue': q, 'dir': fn})

    for worker in workers:
        worker['proc'].start()

    for worker in workers:
        worker['port'] = worker['queue'].get()

    loop = IOLoop()
    s = rpc(ip='127.0.0.1', port=sport)
    start = time()
    try:
        while True:
            ncores = loop.run_sync(s.ncores)
            if len(ncores) == nworkers:
                break
            if time() - start > 5:
                raise Exception("Timeout on cluster creation")

        yield {'proc': scheduler, 'port': sport}, workers
    finally:
        logger.debug("Closing out test cluster")
        with ignoring(socket.error, TimeoutError, StreamClosedError):
            loop.run_sync(lambda: disconnect('127.0.0.1', sport), timeout=0.5)
        scheduler.terminate()
        scheduler.join(timeout=2)

        for port in [w['port'] for w in workers]:
            with ignoring(socket.error, TimeoutError, StreamClosedError):
                loop.run_sync(lambda: disconnect('127.0.0.1', port),
                              timeout=0.5)
        for proc in [w['proc'] for w in workers]:
            with ignoring(Exception):
                proc.terminate()
                proc.join(timeout=2)
        for q in [w['queue'] for w in workers]:
            q.close()
        for fn in glob('_test_worker-*'):
            shutil.rmtree(fn)
        loop.close(all_fds=True)
예제 #6
0
 def atexit(self):
     """atexit callback"""
     if self._atexit_ran:
         return
     self._atexit_ran = True
     # run the cleanup step (in a new loop, because the interrupted one is unclean)
     IOLoop.clear_current()
     loop = IOLoop()
     loop.make_current()
     loop.run_sync(self.cleanup)
예제 #7
0
파일: tornado.py 프로젝트: rbtr/bokeh
    def _atexit(self):
        if self._atexit_ran:
            return
        self._atexit_ran = True

        self._stats_job.stop()
        IOLoop.clear_current()
        loop = IOLoop()
        loop.make_current()
        loop.run_sync(self._cleanup)
예제 #8
0
def run_nanny(port, center_port, **kwargs):
    from distributed import Nanny
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    IOLoop.clear_instance()
    loop = IOLoop(); loop.make_current()
    PeriodicCallback(lambda: None, 500).start()
    logging.getLogger("tornado").setLevel(logging.CRITICAL)
    worker = Nanny('127.0.0.1', port, port + 1000, '127.0.0.1', center_port, **kwargs)
    loop.run_sync(worker._start)
    loop.start()
예제 #9
0
        def test_func():
            IOLoop.clear_instance()
            loop = IOLoop()
            loop.make_current()

            cor = gen.coroutine(func)
            try:
                loop.run_sync(cor, timeout=timeout)
            finally:
                loop.stop()
                loop.close(all_fds=True)
예제 #10
0
        def test_func():
            IOLoop.clear_instance()
            loop = IOLoop()
            loop.make_current()

            s, workers = loop.run_sync(lambda: start_cluster(ncores))
            try:
                loop.run_sync(lambda: cor(s, *workers), timeout=timeout)
            finally:
                loop.run_sync(lambda: end_cluster(s, workers))
                loop.stop()
                loop.close()
예제 #11
0
class HTTPClient(object):
    """A blocking HTTP client.

    This interface is provided for convenience and testing; most applications
    that are running an IOLoop will want to use `AsyncHTTPClient` instead.
    Typical usage looks like this::

        http_client = httpclient.HTTPClient()
        try:
            response = http_client.fetch("http://www.google.com/")
            print(response.body)
        except httpclient.HTTPError as e:
            # HTTPError is raised for non-200 responses; the response
            # can be found in e.response.
            print("Error: " + str(e))
        except Exception as e:
            # Other errors are possible, such as IOError.
            print("Error: " + str(e))
        http_client.close()
    """
    def __init__(self, async_client_class=None, **kwargs):
        self._io_loop = IOLoop(make_current=False)
        if async_client_class is None:
            async_client_class = AsyncHTTPClient
        # Create the client while our IOLoop is "current", without
        # clobbering the thread's real current IOLoop (if any).
        self._async_client = self._io_loop.run_sync(
            gen.coroutine(lambda: async_client_class(**kwargs)))
        self._closed = False

    def __del__(self):
        self.close()

    def close(self):
        """Closes the HTTPClient, freeing any resources used."""
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def fetch(self, request, **kwargs):
        """Executes a request, returning an `HTTPResponse`.

        The request may be either a string URL or an `HTTPRequest` object.
        If it is a string, we construct an `HTTPRequest` using any additional
        kwargs: ``HTTPRequest(request, **kwargs)``

        If an error occurs during the fetch, we raise an `HTTPError` unless
        the ``raise_error`` keyword argument is set to False.
        """
        response = self._io_loop.run_sync(functools.partial(
            self._async_client.fetch, request, **kwargs))
        return response
예제 #12
0
def run_worker(q, center_port, **kwargs):
    from distributed import Worker
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    with log_errors():
        IOLoop.clear_instance()
        loop = IOLoop(); loop.make_current()
        PeriodicCallback(lambda: None, 500).start()
        logging.getLogger("tornado").setLevel(logging.CRITICAL)
        worker = Worker('127.0.0.1', center_port, ip='127.0.0.1', **kwargs)
        loop.run_sync(lambda: worker._start(0))
        q.put(worker.port)
        loop.start()
예제 #13
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
            # TwistedIOLoop only supports the global reactor, so we can't have
            # separate IOLoops for client and server threads.
            raise unittest.SkipTest(
                'Sync HTTPClient not compatible with TwistedIOLoop')
        self.server_ioloop = IOLoop()

        @gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([('/', HelloWorldHandler)])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)
        self.server_ioloop.run_sync(init_server)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)
        self.server_thread.start()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by one iteration because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticeable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).
            self.server_ioloop.add_callback(self.server_ioloop.stop)
        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return 'http://127.0.0.1:%d%s' % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url('/'))
        self.assertEqual(b'Hello world!', response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url('/notfound'))
        self.assertEqual(assertion.exception.code, 404)
예제 #14
0
def run_worker(q, scheduler_port, **kwargs):
    from distributed import Worker
    from tornado.ioloop import IOLoop, PeriodicCallback
    with log_errors():
        IOLoop.clear_instance()
        loop = IOLoop(); loop.make_current()
        PeriodicCallback(lambda: None, 500).start()
        worker = Worker('127.0.0.1', scheduler_port, ip='127.0.0.1',
                        loop=loop, validate=True, **kwargs)
        loop.run_sync(lambda: worker._start(0))
        q.put(worker.port)
        try:
            loop.start()
        finally:
            loop.close(all_fds=True)
예제 #15
0
파일: runner.py 프로젝트: minrk/chpbench
def single_run_ws(url, delay=0, size=0, msgs=1):
    """Time a single websocket run"""
    buf = hexlify(os.urandom(size // 2)).decode('ascii')
    msg = json.dumps({'delay': delay, 'data': buf})

    async def go():
        ws = await websocket_connect(url.replace('http', 'ws') + '/ws')
        for i in range(msgs):
            ws.write_message(msg)
            await ws.read_message()

    asyncio.set_event_loop(asyncio.new_event_loop())
    IOLoop.clear_current()
    loop = IOLoop(make_current=True)
    loop.run_sync(go)
예제 #16
0
파일: test_tasks.py 프로젝트: 1-Hash/Toto
 def test_awaitable(self):
   instance = _Instance()
   instance.increment()
   self.assertEquals(instance.value(), 1)
   awaitable = AwaitableInstance(instance)
   @coroutine
   def yield_tasks():
     self.assertEquals((yield awaitable.increment()), 2)
     self.assertEquals((yield awaitable.increment()), 3)
     self.assertEquals((yield awaitable.increment()), 4)
     self.assertEquals((yield awaitable.value()), 4) 
   loop = IOLoop()
   loop.make_current()
   loop.run_sync(yield_tasks)
   self.assertEquals(instance.value(), 4)
예제 #17
0
    def _execute_cmd(self, host, command, params):
        url = self.START_URL_TPL.format(host=self._wrap_host(host), port=self.minion_port)
        data = self._update_query_parameters(
            dst={'command': command},
            src=params,
        )

        io_loop = IOLoop()
        client = SimpleAsyncHTTPClient(io_loop)
        logger.debug(
            'ioloop for single async http request for host {} started: {}'.format(
                host, command))
        response = io_loop.run_sync(functools.partial(
            client.fetch, url, method='POST',
                               headers=self.minion_headers,
                               body=urllib.urlencode(data, doseq=True),
                               request_timeout=MINIONS_CFG.get('request_timeout', 5.0),
                               allow_ipv6=True,
                               use_gzip=True))
        logger.debug(
            'ioloop for single async http request for host {} finished: {}'.format(
                host, command))

        try:
            data = self._process_state(host, self._get_response(host, response), sync=True)
        except HTTPError:
            logger.exception('Execute cmd raised http error')
            raise
        return data
예제 #18
0
def run_nanny(q, center_port, **kwargs):
    from distributed import Nanny
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    with log_errors():
        IOLoop.clear_instance()
        loop = IOLoop(); loop.make_current()
        PeriodicCallback(lambda: None, 500).start()
        logging.getLogger("tornado").setLevel(logging.CRITICAL)
        worker = Nanny('127.0.0.1', center_port, ip='127.0.0.1', loop=loop, **kwargs)
        loop.run_sync(lambda: worker._start(0))
        q.put(worker.port)
        try:
            loop.start()
        finally:
            loop.run_sync(worker._close)
            loop.close(all_fds=True)
예제 #19
0
 def websocket(self, id):
     loop = IOLoop()
     req = HTTPRequest(
         url_path_join(self.base_url.replace('http', 'ws', 1), 'api/kernels', id, 'channels'),
         headers=self.headers,
     )
     f = websocket_connect(req, io_loop=loop)
     return loop.run_sync(lambda : f)
예제 #20
0
파일: test_tasks.py 프로젝트: 1-Hash/Toto
 def test_yield_task(self):
   queue = TaskQueue()
   task_results = []
   @coroutine
   def yield_tasks():
     task = lambda x: x
     futures = []
     futures.append(queue.yield_task(task, 1))
     futures.append(queue.yield_task(task, 2))
     futures.append(queue.yield_task(task, 3))
     res = yield futures
     task_results[:] = res
   loop = IOLoop()
   loop.make_current()
   loop.run_sync(yield_tasks)
   self.assertEquals(len(task_results), 3)
   self.assertEquals(task_results, [1, 2, 3])
예제 #21
0
class ProxyTestServer(object):

    def __init__(self,):

        self.server_ioloop = IOLoop()
        self.access_count = 0

        @tornado.gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([('/(.*)', ProxyTestHandler, dict(server=self))])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)
        self.server_ioloop.run_sync(init_server)

        self.server_thread = threading.Thread(target=self.server_ioloop.start)

    def start(self):

        self.server_thread.start()

    def stop(self):

        def stop_server():

            self.server.stop()

            @tornado.gen.coroutine
            def slow_stop():
                for i in range(5):
                    yield
                self.server_ioloop.stop()
            
            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.server_ioloop.close(all_fds=True)

    def get_access_count(self):
        return self.access_count

    def clear_access_count(self):
        self.access_count = 0
예제 #22
0
def cluster(nworkers=2, nanny=False):
    if nanny:
        _run_worker = run_nanny
    else:
        _run_worker = run_worker
    _port[0] += 1
    cport = _port[0]
    center = Process(target=run_center, args=(cport,))
    workers = []
    for i in range(nworkers):
        _port[0] += 1
        port = _port[0]
        proc = Process(target=_run_worker, args=(port, cport),
                        kwargs={'ncores': 1, 'local_dir': '_test_worker-%d' % port})
        workers.append({'port': port, 'proc': proc})

    center.start()
    for worker in workers:
        worker['proc'].start()

    sock = connect_sync('127.0.0.1', cport)
    start = time()
    try:
        while True:
            write_sync(sock, {'op': 'ncores'})
            ncores = read_sync(sock)
            if len(ncores) == nworkers:
                break
            if time() - start > 5:
                raise Exception("Timeout on cluster creation")

        yield {'proc': center, 'port': cport}, workers
    finally:
        loop = IOLoop()
        logger.debug("Closing out test cluster")
        for port in [cport] + [w['port'] for w in workers]:
            with ignoring(socket.error, TimeoutError, StreamClosedError):
                loop.run_sync(lambda: disconnect('127.0.0.1', port), timeout=10)
        for proc in [center] + [w['proc'] for w in workers]:
            with ignoring(Exception):
                proc.terminate()
        for fn in glob('_test_worker-*'):
            shutil.rmtree(fn)
예제 #23
0
def run_scheduler(q, center_port=None, **kwargs):
    from distributed import Scheduler
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    IOLoop.clear_instance()
    loop = IOLoop(); loop.make_current()
    PeriodicCallback(lambda: None, 500).start()
    logging.getLogger("tornado").setLevel(logging.CRITICAL)

    center = ('127.0.0.1', center_port) if center_port else None
    scheduler = Scheduler(center=center, **kwargs)
    scheduler.listen(0)

    if center_port:
        loop.run_sync(scheduler.sync_center)
    done = scheduler.start(0)

    q.put(scheduler.port)
    loop.start()
예제 #24
0
파일: test_tasks.py 프로젝트: 1-Hash/Toto
 def test_yield_task_exception(self):
   queue = TaskQueue()
   task_results = []
   @coroutine
   def yield_tasks():
     def task(x):
       raise Exception('failure')
     futures = []
     futures.append(queue.yield_task(task, 1))
     futures.append(queue.yield_task(task, 2))
     futures.append(queue.yield_task(task, 3))
     for f in futures:
       try:
         yield f
       except Exception as e:
         task_results.append(e)
   loop = IOLoop()
   loop.make_current()
   loop.run_sync(yield_tasks)
   self.assertEquals(len(task_results), 3)
   for e in task_results:
     self.assertEquals(e.message, 'failure')
예제 #25
0
파일: nanny.py 프로젝트: dask/distributed
def run_worker_fork(q, ip, scheduler_ip, scheduler_port, ncores, nanny_port,
        worker_port, local_dir, services, name, memory_limit, reconnect,
        resources, validate):
    """ Function run by the Nanny when creating the worker """
    from distributed import Worker  # pragma: no cover
    from tornado.ioloop import IOLoop  # pragma: no cover
    IOLoop.clear_instance()  # pragma: no cover
    loop = IOLoop()  # pragma: no cover
    loop.make_current()  # pragma: no cover
    worker = Worker(scheduler_ip, scheduler_port, ncores=ncores, ip=ip,
                    service_ports={'nanny': nanny_port}, local_dir=local_dir,
                    services=services, name=name, memory_limit=memory_limit,
                    reconnect=reconnect, validate=validate,
                    resources=resources, loop=loop)  # pragma: no cover

    @gen.coroutine  # pragma: no cover
    def run():
        try:  # pragma: no cover
            yield worker._start(worker_port)  # pragma: no cover
        except Exception as e:  # pragma: no cover
            logger.exception(e)  # pragma: no cover
            q.put(e)  # pragma: no cover
        else:
            assert worker.port  # pragma: no cover
            q.put({'port': worker.port, 'dir': worker.local_dir})  # pragma: no cover

        while worker.status != 'closed':
            yield gen.sleep(0.1)

        logger.info("Worker closed")

    try:
        loop.run_sync(run)
    finally:
        loop.stop()
        loop.close(all_fds=True)
예제 #26
0
        def test_func():
            IOLoop.clear_instance()
            loop = IOLoop()
            loop.make_current()

            s, workers = loop.run_sync(lambda: start_cluster(ncores,
                                                             Worker=Worker))
            args = [s] + workers

            if executor:
                e = Executor((s.ip, s.port), loop=loop, start=False)
                loop.run_sync(e._start)
                args = [e] + args
            try:
                loop.run_sync(lambda: cor(*args), timeout=timeout)
            finally:
                if executor:
                    loop.run_sync(e._shutdown)
                loop.run_sync(lambda: end_cluster(s, workers))
                loop.stop()
                loop.close(all_fds=True)
예제 #27
0
    def _terminate_cmd(self, host, uid):
        url = self.TERMINATE_URL_TPL.format(host=host, port=self.minion_port)
        data = {'cmd_uid': uid}

        io_loop = IOLoop()
        client = SimpleAsyncHTTPClient(io_loop)
        response = io_loop.run_sync(functools.partial(
            client.fetch, url, method='POST',
                               headers=self.minion_headers,
                               body=urllib.urlencode(data),
                               request_timeout=MINIONS_CFG.get('request_timeout', 5.0),
                               allow_ipv6=True,
                               use_gzip=True))

        data = self._process_state(host, self._get_response(host, response), sync=True)
        return data
예제 #28
0
class HTTPClient(object):
    """A blocking HTTP client.

    This interface is provided for convenience and testing; most applications
    that are running an IOLoop will want to use `AsyncHTTPClient` instead.
    Typical usage looks like this::

        http_client = httpclient.HTTPClient()
        try:
            response = http_client.fetch("http://www.google.com/")
            print response.body
        except httpclient.HTTPError as e:
            print "Error:", e
        http_client.close()
    """
    def __init__(self, async_client_class=None, **kwargs):
        self._io_loop = IOLoop()
        if async_client_class is None:
            async_client_class = AsyncHTTPClient
        self._async_client = async_client_class(self._io_loop, **kwargs)
        self._closed = False

    def __del__(self):
        self.close()

    def close(self):
        """Closes the HTTPClient, freeing any resources used."""
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def fetch(self, request, **kwargs):
        """Executes a request, returning an `HTTPResponse`.

        The request may be either a string URL or an `HTTPRequest` object.
        If it is a string, we construct an `HTTPRequest` using any additional
        kwargs: ``HTTPRequest(request, **kwargs)``

        If an error occurs during the fetch, we raise an `HTTPError`.
        """
        
        response = self._io_loop.run_sync(functools.partial(
            self._async_client.fetch, request, **kwargs))
        response.rethrow()
        return response
예제 #29
0
    def _execute_cmd(self, host, command, params):
        url = self.START_URL_TPL.format(host=host, port=self.minion_port)
        data = {'command': command}

        params_rename = {
            'success_codes': 'success_code'
        }

        def check_param(key, val):
            if not isinstance(val, (int, long, basestring)):
                logger.warn('Failed parameter {0}, value {1}'.format(key, val))
                raise ValueError('Only strings are accepted as command parameters')

        for k, v in params.iteritems():
            if k == 'command':
                raise ValueError('Parameter "command" is not accepted as command parameter')

            if isinstance(v, (list, tuple)):
                for val in v:
                    check_param(k, val)
            else:
                check_param(k, v)
            data[params_rename.get(k, k)] = v

        io_loop = IOLoop()
        client = SimpleAsyncHTTPClient(io_loop)
        logger.debug(
            'ioloop for single async http request for host {} started: {}'.format(
                host, command))
        response = io_loop.run_sync(functools.partial(
            client.fetch, url, method='POST',
                               headers=self.minion_headers,
                               body=urllib.urlencode(data, doseq=True),
                               request_timeout=MINIONS_CFG.get('request_timeout', 5.0),
                               allow_ipv6=True,
                               use_gzip=True))
        logger.debug(
            'ioloop for single async http request for host {} finished: {}'.format(
                host, command))

        try:
            data = self._process_state(host, self._get_response(host, response), sync=True)
        except HTTPError:
            logger.exception('Execute cmd raised http error')
            raise
        return data
예제 #30
0
class TestIOLoopRunSync(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop()

    def tearDown(self):
        self.io_loop.close()

    def test_sync_result(self):
        self.assertEqual(self.io_loop.run_sync(lambda: 42), 42)

    def test_sync_exception(self):
        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(lambda: 1 / 0)

    def test_async_result(self):
        @gen.coroutine
        def f():
            yield gen.Task(self.io_loop.add_callback)
            raise gen.Return(42)

        self.assertEqual(self.io_loop.run_sync(f), 42)

    def test_async_exception(self):
        @gen.coroutine
        def f():
            yield gen.Task(self.io_loop.add_callback)
            1 / 0

        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(f)

    def test_current(self):
        def f():
            self.assertIs(IOLoop.current(), self.io_loop)

        self.io_loop.run_sync(f)

    def test_timeout(self):
        @gen.coroutine
        def f():
            yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)

        self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
예제 #31
0
class TestIOLoopRunSync(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop()

    def tearDown(self):
        self.io_loop.close()

    def test_sync_result(self):
        with self.assertRaises(gen.BadYieldError):
            self.io_loop.run_sync(lambda: 42)

    def test_sync_exception(self):
        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(lambda: 1 / 0)

    def test_async_result(self):
        @gen.coroutine
        def f():
            yield gen.moment
            raise gen.Return(42)
        self.assertEqual(self.io_loop.run_sync(f), 42)

    def test_async_exception(self):
        @gen.coroutine
        def f():
            yield gen.moment
            1 / 0
        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(f)

    def test_current(self):
        def f():
            self.assertIs(IOLoop.current(), self.io_loop)
        self.io_loop.run_sync(f)

    def test_timeout(self):
        @gen.coroutine
        def f():
            yield gen.sleep(1)
        self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)

    @skipBefore35
    def test_native_coroutine(self):
        @gen.coroutine
        def f1():
            yield gen.moment

        namespace = exec_test(globals(), locals(), """
        async def f2():
            await f1()
        """)
        self.io_loop.run_sync(namespace['f2'])
예제 #32
0
 def test_open_sync(self):
     loop = IOLoop()
     cx = loop.run_sync(self.motor_client(io_loop=loop).open)
     self.assertTrue(isinstance(cx, motor.MotorClient))
예제 #33
0
    def _run(cls, worker_args, worker_kwargs, worker_start_args, silence_logs,
             init_result_q, child_stop_q):  # pragma: no cover
        from distributed import Worker

        try:
            from dask.multiprocessing import initialize_worker_process
        except ImportError:  # old Dask version
            pass
        else:
            initialize_worker_process()

        if silence_logs:
            logger.setLevel(silence_logs)

        IOLoop.clear_instance()
        loop = IOLoop()
        loop.make_current()
        worker = Worker(*worker_args, **worker_kwargs)

        @gen.coroutine
        def do_stop(timeout):
            try:
                yield worker._close(report=False, nanny=False)
            finally:
                loop.stop()

        def watch_stop_q():
            """
            Wait for an incoming stop message and then stop the
            worker cleanly.
            """
            while True:
                try:
                    msg = child_stop_q.get(timeout=1000)
                except Empty:
                    pass
                else:
                    assert msg['op'] == 'stop'
                    loop.add_callback(do_stop, msg['timeout'])
                    break

        t = threading.Thread(target=watch_stop_q,
                             name="Nanny stop queue watch")
        t.daemon = True
        t.start()

        @gen.coroutine
        def run():
            """
            Try to start worker and inform parent of outcome.
            """
            try:
                yield worker._start(*worker_start_args)
            except Exception as e:
                logger.exception("Failed to start worker")
                init_result_q.put(e)
            else:
                assert worker.address
                init_result_q.put({
                    'address': worker.address,
                    'dir': worker.local_dir
                })
                yield worker.wait_until_closed()
                logger.info("Worker closed")

        try:
            loop.run_sync(run)
        except TimeoutError:
            # Loop was stopped before wait_until_closed() returned, ignore
            pass
        except KeyboardInterrupt:
            pass
예제 #34
0
class HTTPClient(object):
    """A blocking HTTP client.

    This interface is provided to make it easier to share code between
    synchronous and asynchronous applications. Applications that are
    running an `.IOLoop` must use `AsyncHTTPClient` instead.

    Typical usage looks like this::

        http_client = httpclient.HTTPClient()
        try:
            response = http_client.fetch("http://www.google.com/")
            print(response.body)
        except httpclient.HTTPError as e:
            # HTTPError is raised for non-200 responses; the response
            # can be found in e.response.
            print("Error: " + str(e))
        except Exception as e:
            # Other errors are possible, such as IOError.
            print("Error: " + str(e))
        http_client.close()

    .. versionchanged:: 5.0

       Due to limitations in `asyncio`, it is no longer possible to
       use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
       Use `AsyncHTTPClient` instead.

    """
    def __init__(self,
                 async_client_class: Type["AsyncHTTPClient"] = None,
                 **kwargs: Any) -> None:
        # Initialize self._closed at the beginning of the constructor
        # so that an exception raised here doesn't lead to confusing
        # failures in __del__.
        self._closed = True
        self._io_loop = IOLoop(make_current=False)
        if async_client_class is None:
            async_client_class = AsyncHTTPClient

        # Create the client while our IOLoop is "current", without
        # clobbering the thread's real current IOLoop (if any).
        async def make_client() -> "AsyncHTTPClient":
            await gen.sleep(0)
            assert async_client_class is not None
            return async_client_class(**kwargs)

        self._async_client = self._io_loop.run_sync(make_client)
        self._closed = False

    def __del__(self) -> None:
        self.close()

    def close(self) -> None:
        """Closes the HTTPClient, freeing any resources used."""
        if not self._closed:
            self._async_client.close()
            self._io_loop.close()
            self._closed = True

    def fetch(self, request: Union["HTTPRequest", str],
              **kwargs: Any) -> "HTTPResponse":
        """Executes a request, returning an `HTTPResponse`.

        The request may be either a string URL or an `HTTPRequest` object.
        If it is a string, we construct an `HTTPRequest` using any additional
        kwargs: ``HTTPRequest(request, **kwargs)``

        If an error occurs during the fetch, we raise an `HTTPError` unless
        the ``raise_error`` keyword argument is set to False.
        """
        response = self._io_loop.run_sync(
            functools.partial(self._async_client.fetch, request, **kwargs))
        return response
예제 #35
0
class TestSessionManager(TestCase):
    def setUp(self):
        self.sm = SessionManager(
            kernel_manager=DummyMKM(),
            contents_manager=ContentsManager(),
        )
        self.loop = IOLoop()

    def tearDown(self):
        self.loop.close(all_fds=True)

    def create_sessions(self, *kwarg_list):
        @gen.coroutine
        def co_add():
            sessions = []
            for kwargs in kwarg_list:
                session = yield self.sm.create_session(**kwargs)
                sessions.append(session)
            raise gen.Return(sessions)

        return self.loop.run_sync(co_add)

    def create_session(self, **kwargs):
        return self.create_sessions(kwargs)[0]

    def test_get_session(self):
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='bar')['id']
        model = sm.get_session(session_id=session_id)
        expected = {
            'id': session_id,
            'notebook': {
                'path': u'/path/to/test.ipynb'
            },
            'kernel': {
                'id': u'A',
                'name': 'bar'
            }
        }
        self.assertEqual(model, expected)

    def test_bad_get_session(self):
        # Should raise error if a bad key is passed to the database.
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='foo')['id']
        self.assertRaises(TypeError, sm.get_session,
                          bad_id=session_id)  # Bad keyword

    def test_get_session_dead_kernel(self):
        sm = self.sm
        session = self.create_session(path='/path/to/1/test1.ipynb',
                                      kernel_name='python')
        # kill the kernel
        sm.kernel_manager.shutdown_kernel(session['kernel']['id'])
        with self.assertRaises(KeyError):
            sm.get_session(session_id=session['id'])
        # no sessions left
        listed = sm.list_sessions()
        self.assertEqual(listed, [])

    def test_list_sessions(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.ipynb', kernel_name='python'),
            dict(path='/path/to/3/test3.ipynb', kernel_name='python'),
        )

        sessions = sm.list_sessions()
        expected = [{
            'id': sessions[0]['id'],
            'notebook': {
                'path': u'/path/to/1/test1.ipynb'
            },
            'kernel': {
                'id': u'A',
                'name': 'python'
            }
        }, {
            'id': sessions[1]['id'],
            'notebook': {
                'path': u'/path/to/2/test2.ipynb'
            },
            'kernel': {
                'id': u'B',
                'name': 'python'
            }
        }, {
            'id': sessions[2]['id'],
            'notebook': {
                'path': u'/path/to/3/test3.ipynb'
            },
            'kernel': {
                'id': u'C',
                'name': 'python'
            }
        }]
        self.assertEqual(sessions, expected)

    def test_list_sessions_dead_kernel(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.ipynb', kernel_name='python'),
        )
        # kill one of the kernels
        sm.kernel_manager.shutdown_kernel(sessions[0]['kernel']['id'])
        listed = sm.list_sessions()
        expected = [{
            'id': sessions[1]['id'],
            'notebook': {
                'path': u'/path/to/2/test2.ipynb',
            },
            'kernel': {
                'id': u'B',
                'name': 'python',
            }
        }]
        self.assertEqual(listed, expected)

    def test_update_session(self):
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='julia')['id']
        sm.update_session(session_id, path='/path/to/new_name.ipynb')
        model = sm.get_session(session_id=session_id)
        expected = {
            'id': session_id,
            'notebook': {
                'path': u'/path/to/new_name.ipynb'
            },
            'kernel': {
                'id': u'A',
                'name': 'julia'
            }
        }
        self.assertEqual(model, expected)

    def test_bad_update_session(self):
        # try to update a session with a bad keyword ~ raise error
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='ir')['id']
        self.assertRaises(TypeError,
                          sm.update_session,
                          session_id=session_id,
                          bad_kw='test.ipynb')  # Bad keyword

    def test_delete_session(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.ipynb', kernel_name='python'),
            dict(path='/path/to/3/test3.ipynb', kernel_name='python'),
        )
        sm.delete_session(sessions[1]['id'])
        new_sessions = sm.list_sessions()
        expected = [{
            'id': sessions[0]['id'],
            'notebook': {
                'path': u'/path/to/1/test1.ipynb'
            },
            'kernel': {
                'id': u'A',
                'name': 'python'
            }
        }, {
            'id': sessions[2]['id'],
            'notebook': {
                'path': u'/path/to/3/test3.ipynb'
            },
            'kernel': {
                'id': u'C',
                'name': 'python'
            }
        }]
        self.assertEqual(new_sessions, expected)

    def test_bad_delete_session(self):
        # try to delete a session that doesn't exist ~ raise error
        sm = self.sm
        self.create_session(path='/path/to/test.ipynb', kernel_name='python')
        self.assertRaises(TypeError, sm.delete_session,
                          bad_kwarg='23424')  # Bad keyword
        self.assertRaises(web.HTTPError, sm.delete_session,
                          session_id='23424')  # nonexistant
예제 #36
0
파일: test_tornado.py 프로젝트: zvuk/smpipi
def async_run(func):
    ioloop = IOLoop()
    ioloop.make_current()
    work = coroutine(func)
    ioloop.run_sync(work, timeout=5)
예제 #37
0
    def _run(
        cls,
        worker_kwargs,
        worker_start_args,
        silence_logs,
        init_result_q,
        child_stop_q,
        uid,
        env,
        config,
        Worker,
    ):  # pragma: no cover
        os.environ.update(env)
        dask.config.set(config)
        try:
            from dask.multiprocessing import initialize_worker_process
        except ImportError:  # old Dask version
            pass
        else:
            initialize_worker_process()

        if silence_logs:
            logger.setLevel(silence_logs)

        IOLoop.clear_instance()
        loop = IOLoop()
        loop.make_current()
        worker = Worker(**worker_kwargs)

        async def do_stop(timeout=5, executor_wait=True):
            try:
                await worker.close(
                    report=False,
                    nanny=False,
                    executor_wait=executor_wait,
                    timeout=timeout,
                )
            finally:
                loop.stop()

        def watch_stop_q():
            """
            Wait for an incoming stop message and then stop the
            worker cleanly.
            """
            while True:
                try:
                    msg = child_stop_q.get(timeout=1000)
                except Empty:
                    pass
                else:
                    child_stop_q.close()
                    assert msg.pop("op") == "stop"
                    loop.add_callback(do_stop, **msg)
                    break

        t = threading.Thread(target=watch_stop_q,
                             name="Nanny stop queue watch")
        t.daemon = True
        t.start()

        async def run():
            """
            Try to start worker and inform parent of outcome.
            """
            try:
                await worker
            except Exception as e:
                logger.exception("Failed to start worker")
                init_result_q.put({"uid": uid, "exception": e})
                init_result_q.close()
            else:
                try:
                    assert worker.address
                except ValueError:
                    pass
                else:
                    init_result_q.put({
                        "address": worker.address,
                        "dir": worker.local_directory,
                        "uid": uid,
                    })
                    init_result_q.close()
                    await worker.finished()
                    logger.info("Worker closed")

        try:
            loop.run_sync(run)
        except (TimeoutError, gen.TimeoutError):
            # Loop was stopped before wait_until_closed() returned, ignore
            pass
        except KeyboardInterrupt:
            # At this point the loop is not running thus we have to run
            # do_stop() explicitly.
            loop.run_sync(do_stop)
예제 #38
0
class TestSessionManager(TestCase):
    def setUp(self):
        self.sm = SessionManager(
            kernel_manager=DummyMKM(),
            contents_manager=ContentsManager(),
        )
        self.loop = IOLoop()
        self.addCleanup(partial(self.loop.close, all_fds=True))

    def create_sessions(self, *kwarg_list):
        @gen.coroutine
        def co_add():
            sessions = []
            for kwargs in kwarg_list:
                kwargs.setdefault('type', 'notebook')
                session = yield self.sm.create_session(**kwargs)
                sessions.append(session)
            raise gen.Return(sessions)

        return self.loop.run_sync(co_add)

    def create_session(self, **kwargs):
        return self.create_sessions(kwargs)[0]

    def test_get_session(self):
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='bar')['id']
        model = self.loop.run_sync(
            lambda: sm.get_session(session_id=session_id))
        expected = {
            'id': session_id,
            'path': u'/path/to/test.ipynb',
            'notebook': {
                'path': u'/path/to/test.ipynb',
                'name': None
            },
            'type': 'notebook',
            'name': None,
            'kernel': {
                'id': 'A',
                'name': 'bar',
                'connections': 0,
                'last_activity': dummy_date_s,
                'execution_state': 'idle',
            }
        }
        self.assertEqual(model, expected)

    def test_bad_get_session(self):
        # Should raise error if a bad key is passed to the database.
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='foo')['id']
        with self.assertRaises(TypeError):
            self.loop.run_sync(
                lambda: sm.get_session(bad_id=session_id))  # Bad keyword

    def test_get_session_dead_kernel(self):
        sm = self.sm
        session = self.create_session(path='/path/to/1/test1.ipynb',
                                      kernel_name='python')
        # kill the kernel
        sm.kernel_manager.shutdown_kernel(session['kernel']['id'])
        with self.assertRaises(KeyError):
            self.loop.run_sync(
                lambda: sm.get_session(session_id=session['id']))
        # no sessions left
        listed = self.loop.run_sync(lambda: sm.list_sessions())
        self.assertEqual(listed, [])

    def test_list_sessions(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.py', type='file',
                 kernel_name='python'),
            dict(path='/path/to/3',
                 name='foo',
                 type='console',
                 kernel_name='python'),
        )

        sessions = self.loop.run_sync(lambda: sm.list_sessions())
        expected = [{
            'id': sessions[0]['id'],
            'path': u'/path/to/1/test1.ipynb',
            'type': 'notebook',
            'notebook': {
                'path': u'/path/to/1/test1.ipynb',
                'name': None
            },
            'name': None,
            'kernel': {
                'id': 'A',
                'name': 'python',
                'connections': 0,
                'last_activity': dummy_date_s,
                'execution_state': 'idle',
            }
        }, {
            'id': sessions[1]['id'],
            'path': u'/path/to/2/test2.py',
            'type': 'file',
            'name': None,
            'kernel': {
                'id': 'B',
                'name': 'python',
                'connections': 0,
                'last_activity': dummy_date_s,
                'execution_state': 'idle',
            }
        }, {
            'id': sessions[2]['id'],
            'path': u'/path/to/3',
            'type': 'console',
            'name': 'foo',
            'kernel': {
                'id': 'C',
                'name': 'python',
                'connections': 0,
                'last_activity': dummy_date_s,
                'execution_state': 'idle',
            }
        }]
        self.assertEqual(sessions, expected)

    def test_list_sessions_dead_kernel(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.ipynb', kernel_name='python'),
        )
        # kill one of the kernels
        sm.kernel_manager.shutdown_kernel(sessions[0]['kernel']['id'])
        listed = self.loop.run_sync(lambda: sm.list_sessions())
        expected = [{
            'id': sessions[1]['id'],
            'path': u'/path/to/2/test2.ipynb',
            'type': 'notebook',
            'name': None,
            'notebook': {
                'path': u'/path/to/2/test2.ipynb',
                'name': None
            },
            'kernel': {
                'id': 'B',
                'name': 'python',
                'connections': 0,
                'last_activity': dummy_date_s,
                'execution_state': 'idle',
            }
        }]
        self.assertEqual(listed, expected)

    def test_update_session(self):
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='julia')['id']
        self.loop.run_sync(lambda: sm.update_session(
            session_id, path='/path/to/new_name.ipynb'))
        model = self.loop.run_sync(
            lambda: sm.get_session(session_id=session_id))
        expected = {
            'id': session_id,
            'path': u'/path/to/new_name.ipynb',
            'type': 'notebook',
            'name': None,
            'notebook': {
                'path': u'/path/to/new_name.ipynb',
                'name': None
            },
            'kernel': {
                'id': 'A',
                'name': 'julia',
                'connections': 0,
                'last_activity': dummy_date_s,
                'execution_state': 'idle',
            }
        }
        self.assertEqual(model, expected)

    def test_bad_update_session(self):
        # try to update a session with a bad keyword ~ raise error
        sm = self.sm
        session_id = self.create_session(path='/path/to/test.ipynb',
                                         kernel_name='ir')['id']
        with self.assertRaises(TypeError):
            self.loop.run_sync(lambda: sm.update_session(
                session_id=session_id, bad_kw='test.ipynb'))  # Bad keyword

    def test_delete_session(self):
        sm = self.sm
        sessions = self.create_sessions(
            dict(path='/path/to/1/test1.ipynb', kernel_name='python'),
            dict(path='/path/to/2/test2.ipynb', kernel_name='python'),
            dict(path='/path/to/3',
                 name='foo',
                 type='console',
                 kernel_name='python'),
        )
        self.loop.run_sync(lambda: sm.delete_session(sessions[1]['id']))
        new_sessions = self.loop.run_sync(lambda: sm.list_sessions())
        expected = [{
            'id': sessions[0]['id'],
            'path': u'/path/to/1/test1.ipynb',
            'type': 'notebook',
            'name': None,
            'notebook': {
                'path': u'/path/to/1/test1.ipynb',
                'name': None
            },
            'kernel': {
                'id': 'A',
                'name': 'python',
                'connections': 0,
                'last_activity': dummy_date_s,
                'execution_state': 'idle',
            }
        }, {
            'id': sessions[2]['id'],
            'type': 'console',
            'path': u'/path/to/3',
            'name': 'foo',
            'kernel': {
                'id': 'C',
                'name': 'python',
                'connections': 0,
                'last_activity': dummy_date_s,
                'execution_state': 'idle',
            }
        }]
        self.assertEqual(new_sessions, expected)

    def test_bad_delete_session(self):
        # try to delete a session that doesn't exist ~ raise error
        sm = self.sm
        self.create_session(path='/path/to/test.ipynb', kernel_name='python')
        with self.assertRaises(TypeError):
            self.loop.run_sync(
                lambda: sm.delete_session(bad_kwarg='23424'))  # Bad keyword
        with self.assertRaises(web.HTTPError):
            self.loop.run_sync(
                lambda: sm.delete_session(session_id='23424'))  # nonexistent
예제 #39
0
def main(scheduler, host, worker_port, http_port, nanny_port, nthreads, nprocs,
         nanny, name, memory_limit, pid_file, temp_filename, reconnect,
         resources, bokeh, bokeh_port, local_directory):
    if nanny:
        port = nanny_port
    else:
        port = worker_port

    if nprocs > 1 and worker_port != 0:
        logger.error("Failed to launch worker.  You cannot use the --port argument when nprocs > 1.")
        exit(1)

    if nprocs > 1 and name:
        logger.error("Failed to launch worker.  You cannot use the --name argument when nprocs > 1.")
        exit(1)

    if not nthreads:
        nthreads = _ncores // nprocs

    if pid_file:
        with open(pid_file, 'w') as f:
            f.write(str(os.getpid()))

        def del_pid_file():
            if os.path.exists(pid_file):
                os.remove(pid_file)
        atexit.register(del_pid_file)

    services = {('http', http_port): HTTPWorker}

    if bokeh:
        try:
            from distributed.bokeh.worker import BokehWorker
        except ImportError:
            pass
        else:
            services[('bokeh', bokeh_port)] = BokehWorker

    if resources:
        resources = resources.replace(',', ' ').split()
        resources = dict(pair.split('=') for pair in resources)
        resources = valmap(float, resources)
    else:
        resources = None

    loop = IOLoop.current()

    if nanny:
        kwargs = {'worker_port': worker_port}
        t = Nanny
    else:
        kwargs = {}
        if nanny_port:
            kwargs['service_ports'] = {'nanny': nanny_port}
        t = Worker

    nannies = [t(scheduler, ncores=nthreads,
                 services=services, name=name, loop=loop, resources=resources,
                 memory_limit=memory_limit, reconnect=reconnect,
                 local_dir=local_directory, **kwargs)
               for i in range(nprocs)]

    for n in nannies:
        if host:
            n.start((host, port))
        else:
            n.start(port)
        if t is Nanny:
            global_nannies.append(n)

    if temp_filename:
        @gen.coroutine
        def f():
            while nannies[0].status != 'running':
                yield gen.sleep(0.01)
            import json
            msg = {'port': nannies[0].port,
                   'local_directory': nannies[0].local_dir}
            with open(temp_filename, 'w') as f:
                json.dump(msg, f)
        loop.add_callback(f)

    @gen.coroutine
    def run():
        while all(n.status != 'closed' for n in nannies):
            yield gen.sleep(0.2)

    try:
        loop.run_sync(run)
    except (KeyboardInterrupt, TimeoutError):
        pass
    finally:
        logger.info("End worker")
        loop.close()

    # Clean exit: unregister all workers from scheduler

    loop2 = IOLoop()

    @gen.coroutine
    def f():
        scheduler = rpc(nannies[0].scheduler.address)
        if nanny:
            yield gen.with_timeout(timedelta(seconds=2),
                    All([scheduler.unregister(address=n.worker_address, close=True)
                         for n in nannies if n.process and n.worker_address]),
                    io_loop=loop2)

    loop2.run_sync(f)

    if nanny:
        for n in nannies:
            if isalive(n.process):
                n.process.terminate()

    if nanny:
        start = time()
        while (any(isalive(n.process) for n in nannies)
                and time() < start + 1):
            sleep(0.1)

    for nanny in nannies:
        nanny.stop()
예제 #40
0
class SyncHTTPClientTest(unittest.TestCase):
    def setUp(self):
        self.server_ioloop = IOLoop()
        event = threading.Event()

        @gen.coroutine
        def init_server():
            sock, self.port = bind_unused_port()
            app = Application([("/", HelloWorldHandler)])
            self.server = HTTPServer(app)
            self.server.add_socket(sock)
            event.set()

        def start():
            self.server_ioloop.run_sync(init_server)
            self.server_ioloop.start()

        self.server_thread = threading.Thread(target=start)
        self.server_thread.start()
        event.wait()

        self.http_client = HTTPClient()

    def tearDown(self):
        def stop_server():
            self.server.stop()
            # Delay the shutdown of the IOLoop by several iterations because
            # the server may still have some cleanup work left when
            # the client finishes with the response (this is noticeable
            # with http/2, which leaves a Future with an unexamined
            # StreamClosedError on the loop).

            @gen.coroutine
            def slow_stop():
                yield self.server.close_all_connections()
                # The number of iterations is difficult to predict. Typically,
                # one is sufficient, although sometimes it needs more.
                for i in range(5):
                    yield
                self.server_ioloop.stop()

            self.server_ioloop.add_callback(slow_stop)

        self.server_ioloop.add_callback(stop_server)
        self.server_thread.join()
        self.http_client.close()
        self.server_ioloop.close(all_fds=True)

    def get_url(self, path):
        return "http://127.0.0.1:%d%s" % (self.port, path)

    def test_sync_client(self):
        response = self.http_client.fetch(self.get_url("/"))
        self.assertEqual(b"Hello world!", response.body)

    def test_sync_client_error(self):
        # Synchronous HTTPClient raises errors directly; no need for
        # response.rethrow()
        with self.assertRaises(HTTPError) as assertion:
            self.http_client.fetch(self.get_url("/notfound"))
        self.assertEqual(assertion.exception.code, 404)
class TestFutureSocket(BaseZMQTestCase):
    Context = future.Context

    def setUp(self):
        self.loop = IOLoop()
        self.loop.make_current()
        super(TestFutureSocket, self).setUp()

    def tearDown(self):
        super(TestFutureSocket, self).tearDown()
        if self.loop:
            self.loop.close(all_fds=True)
        IOLoop.clear_current()
        IOLoop.clear_instance()

    def test_socket_class(self):
        s = self.context.socket(zmq.PUSH)
        assert isinstance(s, future.Socket)
        s.close()

    def test_instance_subclass_first(self):
        actx = self.Context.instance()
        ctx = zmq.Context.instance()
        ctx.term()
        actx.term()
        assert type(ctx) is zmq.Context
        assert type(actx) is self.Context

    def test_instance_subclass_second(self):
        ctx = zmq.Context.instance()
        actx = self.Context.instance()
        ctx.term()
        actx.term()
        assert type(ctx) is zmq.Context
        assert type(actx) is self.Context

    def test_recv_multipart(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_multipart()
            assert not f.done()
            await a.send(b"hi")
            recvd = await f
            self.assertEqual(recvd, [b'hi'])

        self.loop.run_sync(test)

    def test_recv(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f1 = b.recv()
            f2 = b.recv()
            assert not f1.done()
            assert not f2.done()
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f1.done()
            self.assertEqual(f1.result(), b'hi')
            self.assertEqual(recvd, b'there')

        self.loop.run_sync(test)

    def test_recv_cancel(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f1 = b.recv()
            f2 = b.recv_multipart()
            assert f1.cancel()
            assert f1.done()
            assert not f2.done()
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f1.cancelled()
            assert f2.done()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    @pytest.mark.skipif(not hasattr(zmq, 'RCVTIMEO'),
                        reason="requires RCVTIMEO")
    def test_recv_timeout(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            b.rcvtimeo = 100
            f1 = b.recv()
            b.rcvtimeo = 1000
            f2 = b.recv_multipart()
            with pytest.raises(zmq.Again):
                await f1
            await a.send_multipart([b"hi", b"there"])
            recvd = await f2
            assert f2.done()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    @pytest.mark.skipif(not hasattr(zmq, 'SNDTIMEO'),
                        reason="requires SNDTIMEO")
    def test_send_timeout(self):
        async def test():
            s = self.socket(zmq.PUSH)
            s.sndtimeo = 100
            with pytest.raises(zmq.Again):
                await s.send(b"not going anywhere")

        self.loop.run_sync(test)

    def test_send_noblock(self):
        async def test():
            s = self.socket(zmq.PUSH)
            with pytest.raises(zmq.Again):
                await s.send(b"not going anywhere", flags=zmq.NOBLOCK)

        self.loop.run_sync(test)

    def test_send_multipart_noblock(self):
        async def test():
            s = self.socket(zmq.PUSH)
            with pytest.raises(zmq.Again):
                await s.send_multipart([b"not going anywhere"],
                                       flags=zmq.NOBLOCK)

        self.loop.run_sync(test)

    def test_recv_string(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_string()
            assert not f.done()
            msg = u('πøøπ')
            await a.send_string(msg)
            recvd = await f
            assert f.done()
            self.assertEqual(f.result(), msg)
            self.assertEqual(recvd, msg)

        self.loop.run_sync(test)

    def test_recv_json(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_json()
            assert not f.done()
            obj = dict(a=5)
            await a.send_json(obj)
            recvd = await f
            assert f.done()
            self.assertEqual(f.result(), obj)
            self.assertEqual(recvd, obj)

        self.loop.run_sync(test)

    def test_recv_json_cancelled(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_json()
            assert not f.done()
            f.cancel()
            # cycle eventloop to allow cancel events to fire
            await gen.sleep(0)
            obj = dict(a=5)
            await a.send_json(obj)
            with pytest.raises(future.CancelledError):
                recvd = await f
            assert f.done()
            # give it a chance to incorrectly consume the event
            events = await b.poll(timeout=5)
            assert events
            await gen.sleep(0)
            # make sure cancelled recv didn't eat up event
            recvd = await gen.with_timeout(timedelta(seconds=5), b.recv_json())
            assert recvd == obj

        self.loop.run_sync(test)

    def test_recv_pyobj(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.recv_pyobj()
            assert not f.done()
            obj = dict(a=5)
            await a.send_pyobj(obj)
            recvd = await f
            assert f.done()
            self.assertEqual(f.result(), obj)
            self.assertEqual(recvd, obj)

        self.loop.run_sync(test)

    def test_custom_serialize(self):
        def serialize(msg):
            frames = []
            frames.extend(msg.get('identities', []))
            content = json.dumps(msg['content']).encode('utf8')
            frames.append(content)
            return frames

        def deserialize(frames):
            identities = frames[:-1]
            content = json.loads(frames[-1].decode('utf8'))
            return {
                'identities': identities,
                'content': content,
            }

        async def test():
            a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)

            msg = {
                'content': {
                    'a': 5,
                    'b': 'bee',
                }
            }
            await a.send_serialized(msg, serialize)
            recvd = await b.recv_serialized(deserialize)
            assert recvd['content'] == msg['content']
            assert recvd['identities']
            # bounce back, tests identities
            await b.send_serialized(recvd, serialize)
            r2 = await a.recv_serialized(deserialize)
            assert r2['content'] == msg['content']
            assert not r2['identities']

        self.loop.run_sync(test)

    def test_custom_serialize_error(self):
        async def test():
            a, b = self.create_bound_pair(zmq.DEALER, zmq.ROUTER)

            msg = {
                'content': {
                    'a': 5,
                    'b': 'bee',
                }
            }
            with pytest.raises(TypeError):
                await a.send_serialized(json, json.dumps)

            await a.send(b"not json")
            with pytest.raises(TypeError):
                recvd = await b.recv_serialized(json.loads)

        self.loop.run_sync(test)

    def test_poll(self):
        async def test():
            a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
            f = b.poll(timeout=0)
            assert f.done()
            self.assertEqual(f.result(), 0)

            f = b.poll(timeout=1)
            assert not f.done()
            evt = await f
            self.assertEqual(evt, 0)

            f = b.poll(timeout=1000)
            assert not f.done()
            await a.send_multipart([b"hi", b"there"])
            evt = await f
            self.assertEqual(evt, zmq.POLLIN)
            recvd = await b.recv_multipart()
            self.assertEqual(recvd, [b'hi', b'there'])

        self.loop.run_sync(test)

    @pytest.mark.skipif(sys.platform.startswith('win'),
                        reason='Windows unsupported socket type')
    def test_poll_base_socket(self):
        async def test():
            ctx = zmq.Context()
            url = 'inproc://test'
            a = ctx.socket(zmq.PUSH)
            b = ctx.socket(zmq.PULL)
            self.sockets.extend([a, b])
            a.bind(url)
            b.connect(url)

            poller = future.Poller()
            poller.register(b, zmq.POLLIN)

            f = poller.poll(timeout=1000)
            assert not f.done()
            a.send_multipart([b'hi', b'there'])
            evt = await f
            self.assertEqual(evt, [(b, zmq.POLLIN)])
            recvd = b.recv_multipart()
            self.assertEqual(recvd, [b'hi', b'there'])
            a.close()
            b.close()
            ctx.term()

        self.loop.run_sync(test)

    def test_close_all_fds(self):
        s = self.socket(zmq.PUB)
        s._get_loop()
        self.loop.close(all_fds=True)
        self.loop = None  # avoid second close later
        assert s.closed

    @pytest.mark.skipif(
        sys.platform.startswith('win'),
        reason='Windows does not support polling on files',
    )
    def test_poll_raw(self):
        async def test():
            p = future.Poller()
            # make a pipe
            r, w = os.pipe()
            r = os.fdopen(r, 'rb')
            w = os.fdopen(w, 'wb')

            # POLLOUT
            p.register(r, zmq.POLLIN)
            p.register(w, zmq.POLLOUT)
            evts = await p.poll(timeout=1)
            evts = dict(evts)
            assert r.fileno() not in evts
            assert w.fileno() in evts
            assert evts[w.fileno()] == zmq.POLLOUT

            # POLLIN
            p.unregister(w)
            w.write(b'x')
            w.flush()
            evts = await p.poll(timeout=1000)
            evts = dict(evts)
            assert r.fileno() in evts
            assert evts[r.fileno()] == zmq.POLLIN
            assert r.read(1) == b'x'
            r.close()
            w.close()

        self.loop.run_sync(test)
예제 #42
0
def main(scheduler, host, worker_port, http_port, nanny_port, nthreads, nprocs,
         nanny, name, memory_limit, pid_file, reconnect, resources, bokeh,
         bokeh_port, local_directory, scheduler_file, interface, death_timeout,
         preload, bokeh_prefix, tls_ca_file, tls_cert, tls_key):
    sec = Security(
        tls_ca_file=tls_ca_file,
        tls_worker_cert=tls_cert,
        tls_worker_key=tls_key,
    )

    if nanny:
        port = nanny_port
    else:
        port = worker_port

    if nprocs > 1 and worker_port != 0:
        logger.error(
            "Failed to launch worker.  You cannot use the --port argument when nprocs > 1."
        )
        exit(1)

    if nprocs > 1 and name:
        logger.error(
            "Failed to launch worker.  You cannot use the --name argument when nprocs > 1."
        )
        exit(1)

    if not nthreads:
        nthreads = _ncores // nprocs

    if pid_file:
        with open(pid_file, 'w') as f:
            f.write(str(os.getpid()))

        def del_pid_file():
            if os.path.exists(pid_file):
                os.remove(pid_file)

        atexit.register(del_pid_file)

    services = {('http', http_port): HTTPWorker}

    if bokeh:
        try:
            from distributed.bokeh.worker import BokehWorker
        except ImportError:
            pass
        else:
            if bokeh_prefix:
                result = (BokehWorker, {'prefix': bokeh_prefix})
            else:
                result = BokehWorker
            services[('bokeh', bokeh_port)] = result

    if resources:
        resources = resources.replace(',', ' ').split()
        resources = dict(pair.split('=') for pair in resources)
        resources = valmap(float, resources)
    else:
        resources = None

    loop = IOLoop.current()

    if nanny:
        kwargs = {'worker_port': worker_port}
        t = Nanny
    else:
        kwargs = {}
        if nanny_port:
            kwargs['service_ports'] = {'nanny': nanny_port}
        t = Worker

    if scheduler_file:
        while not os.path.exists(scheduler_file):
            sleep(0.01)
        for i in range(10):
            try:
                with open(scheduler_file) as f:
                    cfg = json.load(f)
                scheduler = cfg['address']
                break
            except (ValueError, KeyError):  # race with scheduler on file
                sleep(0.01)

    if not scheduler:
        raise ValueError("Need to provide scheduler address like\n"
                         "dask-worker SCHEDULER_ADDRESS:8786")

    nannies = [
        t(scheduler,
          ncores=nthreads,
          services=services,
          name=name,
          loop=loop,
          resources=resources,
          memory_limit=memory_limit,
          reconnect=reconnect,
          local_dir=local_directory,
          death_timeout=death_timeout,
          preload=preload,
          security=sec,
          **kwargs) for i in range(nprocs)
    ]

    if interface:
        if host:
            raise ValueError("Can not specify both interface and host")
        else:
            host = get_ip_interface(interface)

    if host or port:
        addr = uri_from_host_port(host, port, 0)
    else:
        # Choose appropriate address for scheduler
        addr = None

    for n in nannies:
        n.start(addr)
        if t is Nanny:
            global_nannies.append(n)

    @gen.coroutine
    def run():
        while all(n.status != 'closed' for n in nannies):
            yield gen.sleep(0.2)

    try:
        loop.run_sync(run)
    except (KeyboardInterrupt, TimeoutError):
        pass
    finally:
        logger.info("End worker")
        loop.close()

    # Clean exit: unregister all workers from scheduler

    loop2 = IOLoop()

    @gen.coroutine
    def f():
        if nanny:
            w = nannies[0]
            with w.rpc(w.scheduler.address) as scheduler:
                yield gen.with_timeout(timeout=timedelta(seconds=2),
                                       future=All([
                                           scheduler.unregister(
                                               address=n.worker_address,
                                               close=True) for n in nannies
                                           if n.process and n.worker_address
                                       ]),
                                       io_loop=loop2)

    loop2.run_sync(f)
    loop2.close()

    if nanny:
        for n in nannies:
            if isalive(n.process):
                n.process.terminate()

    if nanny:
        start = time()
        while (any(isalive(n.process) for n in nannies)
               and time() < start + 1):
            sleep(0.1)

    for nanny in nannies:
        nanny.stop()