Beispiel #1
0
class LocalScheduler():
    def __init__(self, dloop):
        """
          Create a local scheduler and worker to run commands
        """
        self.dloop = dloop
        self.loop = dloop.loop

        msg.logMessage("Starting scheduler", msg.INFO)
        msg.logMessage(self.dloop, msg.DEBUG)
        msg.logMessage(self.dloop.loop, msg.DEBUG)

        self.s = Scheduler("localhost", loop=self.dloop.loop)
        self.s.start(0)  # random port
        msg.logMessage("End scheduler", msg.INFO)

        msg.logMessage("Starting worker", msg.INFO)
        self.w = Worker(self.s.ip, self.s.port, ncores=1, loop=self.loop)
        self.w.start(0)

        self.addr = self.s.ip
        self.port = self.s.port

    def execute(self):
        msg.logMessage("Starting executor", msg.INFO)
        self.executor = Executor("{0}:{1}".format(self.addr, self.port))
        msg.logMessage("executor..", msg.DEBUG)

    def close(self):
        self.s.close()
Beispiel #2
0
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        s.listen(0)
        yield s.sync_center()
        done = s.start()

        s.update_graph(dsk={'x': (div, 1, 0)}, keys=['x'])

        progress = TextProgressBar(['x'],
                                   scheduler=(s.ip, s.port),
                                   start=False,
                                   interval=0.01)
        yield progress.listen()

        assert progress.status == 'error'
        assert progress.stream.closed()

        progress = TextProgressBar(['x'],
                                   scheduler=(s.ip, s.port),
                                   start=False,
                                   interval=0.01)
        yield progress.listen()
        assert progress.status == 'error'
        assert progress.stream.closed()

        s.close()
        yield done
def test_services_with_port():
    s = Scheduler(services={('http', 9999): HTTPScheduler})
    s.start()
    try:
        assert isinstance(s.services['http'], HTTPServer)
        assert s.services['http'].port == 9999
    finally:
        s.close()
def test_services():
    s = Scheduler(services={'http': HTTPScheduler})
    s.start()
    try:
        assert isinstance(s.services['http'], HTTPServer)
        assert s.services['http'].port > 0
    finally:
        s.close()
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s.sync_center()
        done = s.start(0)

        progress = TextProgressBar([], scheduler=(s.ip, s.port), start=False,
                                   interval=0.01)
        yield progress.listen()

        assert progress.status == 'finished'
        check_bar_completed(capsys)

        s.close()
        yield done
Beispiel #6
0
def start_cluster(ncores, scheduler_addr, loop, security=None,
                  Worker=Worker, scheduler_kwargs={}, worker_kwargs={}):
    s = Scheduler(loop=loop, validate=True, security=security,
                  **scheduler_kwargs)
    done = s.start(scheduler_addr)
    workers = [Worker(s.address, ncores=ncore[1], name=i, security=security,
                      loop=loop, validate=True,
                      **(merge(worker_kwargs, ncore[2])
                         if len(ncore) > 2
                         else worker_kwargs))
               for i, ncore in enumerate(ncores)]
    for w in workers:
        w.rpc = workers[0].rpc

    yield [w._start(ncore[0]) for ncore, w in zip(ncores, workers)]

    start = time()
    while (len(s.workers) < len(ncores) or
           any(comm.comm is None for comm in s.stream_comms.values())):
        yield gen.sleep(0.01)
        if time() - start > 5:
            yield [w._close(timeout=1) for w in workers]
            yield s.close(fast=True)
            raise Exception("Cluster creation timeout")
    raise gen.Return((s, workers))
Beispiel #7
0
    def g():
        s = Scheduler(ip='127.0.0.1')
        done = s.start()
        s.listen(0)
        a = Worker('127.0.0.1', s.port, ncores=2, ip='127.0.0.1')
        yield a._start()
        b = Worker('127.0.0.1', s.port, ncores=1, ip=b_ip)
        yield b._start()

        start = time()
        try:
            while len(s.ncores) < 2:
                yield gen.sleep(0.01)
                if time() - start > 5:
                    raise Exception("Cluster creation timeout")

            yield f(s, a, b)
        finally:
            logger.debug("Closing out test cluster")
            for w in [a, b]:
                with ignoring(TimeoutError, StreamClosedError, OSError):
                    yield w._close()
                if os.path.exists(w.local_dir):
                    shutil.rmtree(w.local_dir)
            yield s.close()
    def g():
        s = Scheduler(ip='127.0.0.1')
        done = s.start()
        s.listen(0)
        a = Worker('127.0.0.1', s.port, ncores=2, ip='127.0.0.1')
        yield a._start()
        b = Worker('127.0.0.1', s.port, ncores=1, ip=b_ip)
        yield b._start()

        start = time()
        try:
            while len(s.ncores) < 2:
                yield gen.sleep(0.01)
                if time() - start > 5:
                    raise Exception("Cluster creation timeout")

            yield f(s, a, b)
        finally:
            logger.debug("Closing out test cluster")
            for w in [a, b]:
                with ignoring(TimeoutError, StreamClosedError, OSError):
                    yield w._close()
                if os.path.exists(w.local_dir):
                    shutil.rmtree(w.local_dir)
            yield s.close()
Beispiel #9
0
def start_cluster(ncores, scheduler_addr, loop, security=None,
                  Worker=Worker, scheduler_kwargs={}, worker_kwargs={}):
    s = Scheduler(loop=loop, validate=True, security=security,
                  **scheduler_kwargs)
    done = s.start(scheduler_addr)
    workers = [Worker(s.address, ncores=ncore[1], name=i, security=security,
                      loop=loop, validate=True,
                      **(merge(worker_kwargs, ncore[2])
                         if len(ncore) > 2
                         else worker_kwargs))
               for i, ncore in enumerate(ncores)]
    for w in workers:
        w.rpc = workers[0].rpc

    yield [w._start(ncore[0]) for ncore, w in zip(ncores, workers)]

    start = time()
    while (len(s.workers) < len(ncores) or
           any(comm.comm is None for comm in s.stream_comms.values())):
        yield gen.sleep(0.01)
        if time() - start > 5:
            yield [w._close(timeout=1) for w in workers]
            yield s.close(fast=True)
            raise Exception("Cluster creation timeout")
    raise gen.Return((s, workers))
Beispiel #10
0
    def f():
        s = Scheduler(loop=loop)
        done = s.start(0)
        a = Worker(s.ip, s.port, loop=loop, ncores=1)
        b = Worker(s.ip, s.port, loop=loop, ncores=1)
        yield [a._start(0), b._start(0)]

        progress = TextProgressBar([], scheduler=(s.ip, s.port), start=False,
                                   interval=0.01)
        yield progress.listen()

        assert progress.status == 'finished'
        check_bar_completed(capsys)

        yield [a._close(), b._close()]
        s.close()
        yield done
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s.sync_center()
        done = s.start()

        s.update_graph(dsk={'x': (div, 1, 0)},
                       keys=['x'])

        progress = TextProgressBar(['x'], scheduler=(s.ip, s.port),
                                   start=False, interval=0.01)
        yield progress.listen()

        assert progress.status == 'error'
        assert progress.stream.closed()

        progress = TextProgressBar(['x'], scheduler=(s.ip, s.port),
                                   start=False, interval=0.01)
        yield progress.listen()
        assert progress.status == 'error'
        assert progress.stream.closed()

        s.close()
        yield done
Beispiel #12
0
def test_tls_scheduler(security, loop):
    s = Scheduler(security=security, loop=loop)
    s.start("localhost")
    assert s.address.startswith("tls")
    s.close()
Beispiel #13
0
class MarathonCluster(object):

    def __init__(self, loop=None, nworkers=0, ip=None, scheduler_port=0,
                 diagnostics_port=8787, services={}, adaptive=False,
                 silence_logs=logging.CRITICAL,
                 **kwargs):
        if silence_logs:
            for l in ['distributed.scheduler',
                      'distributed.worker',
                      'distributed.core',
                      'distributed.nanny']:
                logging.getLogger(l).setLevel(silence_logs)

        self.loop = loop or IOLoop()
        if not self.loop._running:
            self._thread = Thread(target=self.loop.start)
            self._thread.daemon = True
            self._thread.start()
            while not self.loop._running:
                sleep(0.001)

        if diagnostics_port is not None:
            try:
                from distributed.bokeh.scheduler import BokehScheduler
            except ImportError:
                logger.info('To start diagnostics server please install Bokeh')
            else:
                services[('bokeh', diagnostics_port)] = BokehScheduler

        self.scheduler = Scheduler(loop=self.loop, services=services)
        self.workers = MarathonWorkers(self.scheduler, **kwargs)
        if adaptive:
            self.adaptive = Adaptive(self.scheduler, self.workers)

        if ip is None:
            ip = '127.0.0.1'
        self.scheduler_port = scheduler_port
        self.scheduler.start((ip, scheduler_port))
        self.workers.start(nworkers)
        self.status = 'running'

        logging.info('Scheduler address: {}'.format(self.scheduler.address))

    def scale_up(self, nworkers):
        self.workers.scale_up(nworkers)

    def scale_down(self, workers):
        self.workers.scale_down(workers)

    def __str__(self):
        return 'MarathonCluster({}, workers={}, ncores={})'.format(
            self.scheduler.address, len(self.scheduler.workers),
            self.scheduler.total_ncores)

    __repr__ = __str__

    @gen.coroutine
    def _close(self):
        if self.status == 'closed':
            return

        logging.info('Stopping workers...')
        self.workers.close()

        with ignoring(gen.TimeoutError, CommClosedError, OSError):
            logging.info('Stopping scheduler...')
            yield self.scheduler.close(fast=True)

        self.status = 'closed'

    def close(self):
        """ Close the cluster """
        if self.status == 'closed':
            return

        if self.loop._running:
            sync(self.loop, self._close)
        if hasattr(self, '_thread'):
            sync(self.loop, self.loop.stop)
            self._thread.join(timeout=1)
            self.loop.close()
            del self._thread

    def __enter__(self):
        return self

    def __exit__(self, *args):
        self.close()

    @property
    def scheduler_address(self):
        try:
            return self.scheduler.address
        except ValueError:
            return '<unstarted>'
def test_tls_scheduler(security, loop):
    s = Scheduler(security=security, loop=loop)
    s.start('localhost')
    assert s.address.startswith('tls')
    s.close()
Beispiel #15
0
def test_tls_scheduler(security, loop):
    s = Scheduler(security=security, loop=loop)
    s.start('localhost')
    assert s.address.startswith('tls')
    s.close()