コード例 #1
0
def test_watch():
    start = time()

    def stop():
        return time() > start + 0.500

    start_threads = threading.active_count()

    log = watch(interval="10ms", cycle="50ms", stop=stop)

    start = time()  # wait until thread starts up
    while threading.active_count() <= start_threads:
        assert time() < start + 2
        sleep(0.01)

    sleep(0.5)
    assert 1 < len(log) < 10

    start = time()
    while threading.active_count() > start_threads:
        assert time() < start + 2
        sleep(0.01)
コード例 #2
0
def test_watch():
    start = metrics.time()

    def stop():
        return metrics.time() > start + 0.500

    start_threads = threading.active_count()

    log = watch(interval='10ms', cycle='50ms', stop=stop)

    start = metrics.time()  # wait until thread starts up
    while threading.active_count() <= start_threads:
        assert metrics.time() < start + 2
        time.sleep(0.01)

    time.sleep(0.5)
    assert 1 < len(log) < 10

    start = metrics.time()
    while threading.active_count() > start_threads:
        assert metrics.time() < start + 2
        time.sleep(0.01)
コード例 #3
0
ファイル: test_profile.py プロジェクト: tomMoral/distributed
def test_watch():
    start = metrics.time()

    def stop():
        return metrics.time() > start + 0.500

    start_threads = threading.active_count()

    log = watch(interval='10ms', cycle='50ms', stop=stop)

    start = metrics.time()  # wait until thread starts up
    while threading.active_count() <= start_threads:
        assert metrics.time() < start + 2
        time.sleep(0.01)

    time.sleep(0.5)
    assert 1 < len(log) < 10

    start = metrics.time()
    while threading.active_count() > start_threads:
        assert metrics.time() < start + 2
        time.sleep(0.01)
コード例 #4
0
ファイル: core.py プロジェクト: haraldschilly/distributed
    def __init__(
        self,
        handlers,
        blocked_handlers=None,
        stream_handlers=None,
        connection_limit=512,
        deserialize=True,
        serializers=None,
        deserializers=None,
        connection_args=None,
        timeout=None,
        io_loop=None,
    ):
        self.handlers = {
            "identity": self.identity,
            "echo": self.echo,
            "connection_stream": self.handle_stream,
            "dump_state": self._to_dict,
        }
        self.handlers.update(handlers)
        if blocked_handlers is None:
            blocked_handlers = dask.config.get(
                "distributed.%s.blocked-handlers" % type(self).__name__.lower(), []
            )
        self.blocked_handlers = blocked_handlers
        self.stream_handlers = {}
        self.stream_handlers.update(stream_handlers or {})

        self.id = type(self).__name__ + "-" + str(uuid.uuid4())
        self._address = None
        self._listen_address = None
        self._port = None
        self._comms = {}
        self.deserialize = deserialize
        self.monitor = SystemMonitor()
        self.counters = None
        self.digests = None
        self._ongoing_coroutines = weakref.WeakSet()
        self._event_finished = asyncio.Event()

        self.listeners = []
        self.io_loop = io_loop or IOLoop.current()
        self.loop = self.io_loop

        if not hasattr(self.io_loop, "profile"):
            ref = weakref.ref(self.io_loop)

            def stop():
                loop = ref()
                return loop is None or loop.asyncio_loop.is_closed()

            self.io_loop.profile = profile.watch(
                omit=("profile.py", "selectors.py"),
                interval=dask.config.get("distributed.worker.profile.interval"),
                cycle=dask.config.get("distributed.worker.profile.cycle"),
                stop=stop,
            )

        # Statistics counters for various events
        with suppress(ImportError):
            from distributed.counter import Digest

            self.digests = defaultdict(partial(Digest, loop=self.io_loop))

        from distributed.counter import Counter

        self.counters = defaultdict(partial(Counter, loop=self.io_loop))

        self.periodic_callbacks = dict()

        pc = PeriodicCallback(
            self.monitor.update,
            parse_timedelta(
                dask.config.get("distributed.admin.system-monitor.interval")
            )
            * 1000,
        )
        self.periodic_callbacks["monitor"] = pc

        self._last_tick = time()
        self._tick_counter = 0
        self._tick_count = 0
        self._tick_count_last = time()
        self._tick_interval = parse_timedelta(
            dask.config.get("distributed.admin.tick.interval"), default="ms"
        )
        self._tick_interval_observed = self._tick_interval
        self.periodic_callbacks["tick"] = PeriodicCallback(
            self._measure_tick, self._tick_interval * 1000
        )
        self.periodic_callbacks["ticks"] = PeriodicCallback(
            self._cycle_ticks,
            parse_timedelta(dask.config.get("distributed.admin.tick.cycle")) * 1000,
        )

        self.thread_id = 0

        def set_thread_ident():
            self.thread_id = threading.get_ident()

        self.io_loop.add_callback(set_thread_ident)
        self._startup_lock = asyncio.Lock()
        self.status = Status.undefined

        self.rpc = ConnectionPool(
            limit=connection_limit,
            deserialize=deserialize,
            serializers=serializers,
            deserializers=deserializers,
            connection_args=connection_args,
            timeout=timeout,
            server=self,
        )

        self.__stopped = False