Example #1
0
    def boot(self):
        '''
        The basic function of the timer loop is that it receives events to
        schedule and if there are events to schedule, it will schedule them.
        If there are events up for execution, it will execute them and then
        add another event to schedule to the schedule queue.
        Since it works this way, we can make it speedy if there is a need, for
        instance if there's a lot of events that happen at the same time, but
        we can also loop with larger sleeps when there's nothing to do.
        It dispatches the callback execution to a thread pool which then deals
        with the execution. This thread pool is proportionate in size to the
        number of cores available.
        '''
        LOG.debug('Revving up the scheduler!')
        scheduled_events = []
        LOG.debug('Setting loops per second to: %.2f', 1 / self._resolution)
        while True:
            begin_time = time.monotonic()
            unix_time = time.time()
            self._process_event(scheduled_events, unix_time)
            quit = self._process_queue(scheduled_events)
            if quit:
                break

            time_spent = time.monotonic() - begin_time
            # Make the loop tick on .0 unix time for style points
            sleep_time = self._resolution - math.fmod(unix_time + time_spent,
                                                      self._resolution)
            # We don't need to sleep if we have a queue or spent too much time
            if sleep_time < 0 or self._scheduleq.qsize() > 0:
                sleep_time = 0
            time.sleep(sleep_time)
Example #2
0
def test_zmq_with_thread(count):
    """zmq with threads"""
    print('.', end='', flush=True)
    ctx = zmq.Context()
    dealer = ctx.socket(zmq.DEALER)
    dealer.bind('tcp://127.0.0.1:*')
    address = dealer.getsockopt(zmq.LAST_ENDPOINT).rstrip(b'\0')
    msg = b'func', b'\0'*200

    def router_thread():
        router = ctx.socket(zmq.ROUTER)
        router.connect(address)

        for i in range(count):
            addr, m1, m2 = router.recv_multipart()
            router.send_multipart((addr, m1, m2))

        router.close()

    th = threading.Thread(target=router_thread)
    th.start()
    gc.collect()
    t1 = time.monotonic()
    for i in range(count):
        dealer.send_multipart(msg)
        dealer.recv_multipart()
    t2 = time.monotonic()
    gc.collect()
    th.join()
    dealer.close()
    ctx.destroy()
    return t2 - t1
Example #3
0
def test_eintr(wfs, spair):
    a, b = spair
    interrupt_count = [0]

    def handler(sig, frame):
        assert sig == signal.SIGALRM
        interrupt_count[0] += 1

    old_handler = signal.signal(signal.SIGALRM, handler)
    try:
        assert not wfs(a, read=True, timeout=0)
        start = monotonic()
        try:
            # Start delivering SIGALRM 10 times per second
            signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1)
            # Sleep for 1 second (we hope!)
            wfs(a, read=True, timeout=1)
        finally:
            # Stop delivering SIGALRM
            signal.setitimer(signal.ITIMER_REAL, 0)
        end = monotonic()
        dur = end - start
        assert 0.9 < dur < 3
    finally:
        signal.signal(signal.SIGALRM, old_handler)

    assert interrupt_count[0] > 0
Example #4
0
 def incoming_telemetry(self, label, telemetry: TelemetryInfo):
     self.telemetries[label] = telemetry
     self.timestamps[label] = time.monotonic()
     self.emit('telemetry', label, telemetry)
     if (time.monotonic() - self._last_overall_telemetry_emit) > self.state['overall_telemetry_interval']:
         self.emit('telemetry', None, self.get_telemetry(None))
         self._last_overall_telemetry_emit = time.monotonic()
Example #5
0
def test_eintr_infinite_timeout(wfs, spair):
    a, b = spair
    interrupt_count = [0]

    def handler(sig, frame):
        assert sig == signal.SIGALRM
        interrupt_count[0] += 1

    def make_a_readable_after_one_second():
        time.sleep(1)
        b.send(b"x")

    old_handler = signal.signal(signal.SIGALRM, handler)
    try:
        assert not wfs(a, read=True, timeout=0)
        start = monotonic()
        try:
            # Start delivering SIGALRM 10 times per second
            signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1)
            # Sleep for 1 second (we hope!)
            thread = threading.Thread(target=make_a_readable_after_one_second)
            thread.start()
            wfs(a, read=True)
        finally:
            # Stop delivering SIGALRM
            signal.setitimer(signal.ITIMER_REAL, 0)
            thread.join()
        end = monotonic()
        dur = end - start
        assert 0.9 < dur < 3
    finally:
        signal.signal(signal.SIGALRM, old_handler)

    assert interrupt_count[0] > 0
Example #6
0
 def debug_sql(self, sql=None, params=None, use_last_executed_query=False, many=False):
     start = time.monotonic()
     try:
         yield
     finally:
         stop = time.monotonic()
         duration = stop - start
         if use_last_executed_query:
             sql = self.db.ops.last_executed_query(self.cursor, sql, params)
         try:
             times = len(params) if many else ''
         except TypeError:
             # params could be an iterator.
             times = '?'
         self.db.queries_log.append({
             'sql': '%s times: %s' % (times, sql) if many else sql,
             'time': '%.3f' % duration,
         })
         logger.debug(
             '(%.3f) %s; args=%s',
             duration,
             sql,
             params,
             extra={'duration': duration, 'sql': sql, 'params': params},
         )
Example #7
0
    def go():
        router_closed = asyncio.Future()
        dealer_closed = asyncio.Future()
        router, _ = yield from loop.create_zmq_connection(
            lambda: ZmqRouterProtocol(router_closed),
            zmq.ROUTER,
            bind='tcp://127.0.0.1:*')

        addr = next(iter(router.bindings()))
        dealer, _ = yield from loop.create_zmq_connection(
            lambda: ZmqDealerProtocol(count, dealer_closed),
            zmq.DEALER,
            connect=addr)

        msg = b'func', b'\0'*200

        gc.collect()
        t1 = time.monotonic()
        dealer.write(msg)
        yield from dealer_closed
        t2 = time.monotonic()
        gc.collect()
        router.close()
        yield from router_closed
        return t2 - t1
Example #8
0
    def test_call_later_1(self):
        calls = []

        def cb(inc=10, stop=False):
            calls.append(inc)
            self.assertTrue(self.loop.is_running())
            if stop:
                self.loop.call_soon(self.loop.stop)

        self.loop.call_later(0.05, cb)

        # canceled right away
        h = self.loop.call_later(0.05, cb, 100, True)
        self.assertIn('.cb', repr(h))
        h.cancel()
        self.assertIn('cancelled', repr(h))

        self.loop.call_later(0.05, cb, 1, True)
        self.loop.call_later(1000, cb, 1000)  # shouldn't be called

        started = time.monotonic()
        self.loop.run_forever()
        finished = time.monotonic()

        self.assertLess(finished - started, 0.1)
        self.assertGreater(finished - started, 0.04)

        self.assertEqual(calls, [10, 1])

        self.assertFalse(self.loop.is_running())
def preprocess_pages(db, cur, pool):

    # This is not in itertools, for no good reason.
    def chunked(iterable, n):
        it = iter(iterable)
        while True:
           chunk = tuple(itertools.islice(it, n))
           if not chunk:
               return
           yield chunk

    # There is no good way to hold a cursor open on a read query while
    # simultaneously making commits to one of the tables involved.  We
    # work around this by maintaining a local list of rows to process.
    # We don't just pull out all of the page contents in advance
    # because that would blow out the RAM.

    # We need a base URL for each page. In the cases where more than
    # one URL maps to the same page, we just pick one and hope it
    # doesn't matter.  It is enormously more efficient to do this at
    # the same time as we pull out the list of pages.

    sys.stdout.write("Determining job size...\n")
    sys.stdout.flush()

    cur.execute("SELECT h.id, s.url"
                "  FROM collection.capture_html_content h"
                "  JOIN collection.common_crawl_pages c ON c.html_content = h.id"
                "  JOIN collection.url_strings s ON c.url = s.id"
                " WHERE h.extracted IS NULL")
    pages = cur.fetchall()
    total_pages = len(pages)
    if not total_pages:
        return

    processed = 0
    start = time.monotonic()
    sys.stdout.write("Processing 0/{}...\n".format(total_pages))
    for chunk in chunked(pages, 1000):
        with db:
            cur.execute(" SELECT id, content"
                        "  FROM collection.capture_html_content"
                        "  WHERE id = ANY(%s)",
                        ([c[0] for c in chunk],))
            # (id, content) join (id, url) -> (id, content, url)
            # memoryviews cannot go through pickle/unpickle
            content = { r[0] : bytes(r[1]) for r in cur }
            block = [(c[0], content[c[0]], c[1]) for c in chunk]

            for result in pool.imap_unordered(do_content_extraction, block):
                insert_result(cur, result)

        stop = time.monotonic()
        processed += len(block)
        elapsed = stop - start
        remain  = (total_pages - processed)*(elapsed/processed)
        sys.stdout.write("Processed {}/{} in {} remaining {}\n"
                         .format(processed, total_pages,
                                 fmt_interval(elapsed),
                                 fmt_interval(remain)))
Example #10
0
 def _get_spad_info(self):
     # Get reference SPAD count and type, returned as a 2-tuple of
     # count and boolean is_aperture.  Based on code from:
     #   https://github.com/pololu/vl53l0x-arduino/blob/master/VL53L0X.cpp
     for pair in ((0x80, 0x01), (0xFF, 0x01), (0x00, 0x00), (0xFF, 0x06)):
         self._write_u8(pair[0], pair[1])
     self._write_u8(0x83, self._read_u8(0x83) | 0x04)
     for pair in ((0xFF, 0x07), (0x81, 0x01), (0x80, 0x01),
                  (0x94, 0x6b), (0x83, 0x00)):
         self._write_u8(pair[0], pair[1])
     start = time.monotonic()
     while self._read_u8(0x83) == 0x00:
         if self.io_timeout_s > 0 and \
            (time.monotonic() - start) >= self.io_timeout_s:
             raise RuntimeError('Timeout waiting for VL53L0X!')
     self._write_u8(0x83, 0x01)
     tmp = self._read_u8(0x92)
     count = tmp & 0x7F
     is_aperture = ((tmp >> 7) & 0x01) == 1
     for pair in ((0x81, 0x00), (0xFF, 0x06)):
         self._write_u8(pair[0], pair[1])
     self._write_u8(0x83, self._read_u8(0x83) & ~0x04)
     for pair in ((0xFF, 0x01), (0x00, 0x01), (0xFF, 0x00), (0x80, 0x00)):
         self._write_u8(pair[0], pair[1])
     return (count, is_aperture)
Example #11
0
 def range(self):
     """Perform a single reading of the range for an object in front of
     the sensor and return the distance in millimeters.
     """
     # Adapted from readRangeSingleMillimeters &
     # readRangeContinuousMillimeters in pololu code at:
     #   https://github.com/pololu/vl53l0x-arduino/blob/master/VL53L0X.cpp
     for pair in ((0x80, 0x01), (0xFF, 0x01), (0x00, 0x00),
                  (0x91, self._stop_variable), (0x00, 0x01), (0xFF, 0x00),
                  (0x80, 0x00), (_SYSRANGE_START, 0x01)):
         self._write_u8(pair[0], pair[1])
     start = time.monotonic()
     while (self._read_u8(_SYSRANGE_START) & 0x01) > 0:
         if self.io_timeout_s > 0 and \
            (time.monotonic() - start) >= self.io_timeout_s:
             raise RuntimeError('Timeout waiting for VL53L0X!')
     start = time.monotonic()
     while (self._read_u8(_RESULT_INTERRUPT_STATUS) & 0x07) == 0:
         if self.io_timeout_s > 0 and \
            (time.monotonic() - start) >= self.io_timeout_s:
             raise RuntimeError('Timeout waiting for VL53L0X!')
     # assumptions: Linearity Corrective Gain is 1000 (default)
     # fractional ranging is not enabled
     range_mm = self._read_u16(_RESULT_RANGE_STATUS + 10)
     self._write_u8(_SYSTEM_INTERRUPT_CLEAR, 0x01)
     return range_mm
Example #12
0
    def check_parallel_module_init(self, mock_os):
        if imp.lock_held():
            # This triggers on, e.g., from test import autotest.
            raise unittest.SkipTest("can't run when import lock is held")

        done = threading.Event()
        for N in (20, 50) * 3:
            if verbose:
                print("Trying", N, "threads ...", end=' ')
            # Make sure that random and modulefinder get reimported freshly
            for modname in ['random', 'modulefinder']:
                try:
                    del sys.modules[modname]
                except KeyError:
                    pass
            errors = []
            done_tasks = []
            done.clear()
            t0 = time.monotonic()
            with start_threads(threading.Thread(target=task,
                                                args=(N, done, done_tasks, errors,))
                               for i in range(N)):
                pass
            completed = done.wait(10 * 60)
            dt = time.monotonic() - t0
            if verbose:
                print("%.1f ms" % (dt*1e3), flush=True, end=" ")
            dbg_info = 'done: %s/%s' % (len(done_tasks), N)
            self.assertFalse(errors, dbg_info)
            self.assertTrue(completed, dbg_info)
            if verbose:
                print("OK.")
Example #13
0
    def _poll_for_io(self):
        if self._sleeping:
            timeout = self._sleeping[0][0] - time.monotonic()
        else:
            timeout = None

        events = self._selector.select(timeout)
        for key, mask in events:
            task = key.data
            self._selector.unregister(key.fileobj)
            self._reschedule_task(task)

        # Process sleeping tasks (if any)
        if self._sleeping:
            current = time.monotonic()
            while self._sleeping and self._sleeping[0][0] <= current:
                tm, _, task, sleep_type = heapq.heappop(self._sleeping)
                # When a task wakes, verify that the timeout value matches that stored
                # on the task. If it differs, it means that the task completed its
                # operation, was cancelled, or is no longer concerned with this
                # sleep operation.  In that case, we do nothing
                if tm == task.timeout:
                    if sleep_type == 'sleep':
                        self._reschedule_task(task)
                    elif sleep_type == 'timeout':
                        self._cancel_task(task, exc=TimeoutError)
Example #14
0
	def __next__(self):
		with self.lock:
			t = time.monotonic()
			if t < self.next_yield:
				time.sleep(self.next_yield - t)
				t = time.monotonic()
			self.next_yield = t + self.interval
Example #15
0
 def should_terminate(self):
     if self.stopping_start is None:
         self.stopping_start = monotonic()
         return False
     else:
         dt = monotonic() - self.stopping_start
         return dt if dt >= ACTOR_ACTION_TIMEOUT else False
Example #16
0
    def test_sigwaitinfo(self):
        signum = signal.SIGUSR1
        pid = os.getpid()

        old_handler = signal.signal(signum, lambda *args: None)
        self.addCleanup(signal.signal, signum, old_handler)

        code = '\n'.join((
            'import os, time',
            'pid = %s' % os.getpid(),
            'signum = %s' % int(signum),
            'sleep_time = %r' % self.sleep_time,
            'time.sleep(sleep_time)',
            'os.kill(pid, signum)',
        ))

        t0 = time.monotonic()
        proc = self.subprocess(code)
        with kill_on_error(proc):
            # parent
            signal.sigwaitinfo([signum])
            dt = time.monotonic() - t0
            self.assertEqual(proc.wait(), 0)

        self.assertGreaterEqual(dt, self.sleep_time)
Example #17
0
def do_capture(url, proxy, loop):
    result = CaptureResult(url)
    if result.status:
        return result

    start = time.monotonic()
    proc = yield from asyncio.create_subprocess_exec(
        *proxy.adjust_command([
            "isolate",
            "ISOL_RL_MEM=unlimited",
            "ISOL_RL_STACK=8388608",
            "PHANTOMJS_DISABLE_CRASH_DUMPS=1",
            "MALLOC_CHECK_=0",
            "phantomjs",
            "--local-url-access=no",
            "--load-images=false",
            pj_trace_redir,
            "--capture",
            result.original_url
        ]),
        stdin  = subprocess.DEVNULL,
        stdout = subprocess.PIPE,
        stderr = subprocess.PIPE,
        loop   = loop)

    stdout, stderr = yield from proc.communicate()
    elapsed = time.monotonic() - start
    result.set_result(proc.returncode, stdout, stderr, elapsed)
    return result
Example #18
0
 def reset(self):
     if time.monotonic() > self.bounce_filter_time + self.bounce_filter_timer:
         if self.toggle_on_input and not self.state:
             self.state = True
         else:
             self.state = False
         self.bounce_filter_timer = time.monotonic()
Example #19
0
    def recv_timeout(self, timeout):
        if timeout is None:
            return self.recv()

        time_now = time.monotonic() if six.PY3 else time.time()
        #: calculate until when it may take
        timeout_until = time_now + timeout

        while time_now < timeout_until:
            time_left = timeout_until - time_now

            socks = dict(self.pollin.poll(time_left * 1000))  # poll needs milliseconds
            if socks.get(self.socket) == zmq.POLLIN:
                try:
                    reply = self.recv()
                    # No error? Then it is the answer that we wanted. Good.
                    return reply
                except UnknownMessageId:
                    # Okay, false alarm. Reset the current time and try again.
                    time_now = time.monotonic() if six.PY3 else time.time()
                    continue
                # answer did not arrive in time
            else:
                raise ZMQTimeout()
        raise ZMQTimeout()
Example #20
0
    def execute_cli_command(self, command, timeout=None):
        # While the command is being executed, incoming frames will be lost.
        # SLCAN driver from PyUAVCAN goes at great lengths to properly separate CLI response lines
        # from SLCAN messages in real time with minimal additional latency, so use it if you care about this.
        timeout = self._resolve_timeout(timeout)
        self.port.writeTimeout = timeout
        command += '\r\n'
        self._write(command)

        deadline = time.monotonic() + (timeout if timeout is not None else 999999999)
        self.port.timeout = 1
        response = bytes()

        while True:
            if time.monotonic() > deadline:
                raise TimeoutException('SLCAN CLI response timeout; command: %r' % command)

            b = self.port.read()
            if b == self.CLI_END_OF_TEXT:
                break
            if b:
                response += b

        # Removing SLCAN lines from response
        return re.sub(r'.*\r[^\n]', '', response.decode()).strip().replace(command, '')
Example #21
0
    def zipTest(self, f, compression):
        # Create the ZIP archive.
        zipfp = zipfile.ZipFile(f, "w", compression)

        # It will contain enough copies of self.data to reach about 6 GiB of
        # raw data to store.
        filecount = 6*1024**3 // len(self.data)

        next_time = time.monotonic() + _PRINT_WORKING_MSG_INTERVAL
        for num in range(filecount):
            zipfp.writestr("testfn%d" % num, self.data)
            # Print still working message since this test can be really slow
            if next_time <= time.monotonic():
                next_time = time.monotonic() + _PRINT_WORKING_MSG_INTERVAL
                print((
                   '  zipTest still writing %d of %d, be patient...' %
                   (num, filecount)), file=sys.__stdout__)
                sys.__stdout__.flush()
        zipfp.close()

        # Read the ZIP archive
        zipfp = zipfile.ZipFile(f, "r", compression)
        for num in range(filecount):
            self.assertEqual(zipfp.read("testfn%d" % num), self.data)
            # Print still working message since this test can be really slow
            if next_time <= time.monotonic():
                next_time = time.monotonic() + _PRINT_WORKING_MSG_INTERVAL
                print((
                   '  zipTest still reading %d of %d, be patient...' %
                   (num, filecount)), file=sys.__stdout__)
                sys.__stdout__.flush()
        zipfp.close()
Example #22
0
def timer():
    tic = monotonic()
    toc = None
    try:
        yield lambda : toc - tic
    finally:
        toc = monotonic()
Example #23
0
    def send_messages(self, message_batch):
        if not self._init_es():
            return
        start_time = time.monotonic()
        try:
            actions = []
            for msg in message_batch:
                message = json.loads(msg.decode("utf8"))
                timestamp = message.get("timestamp")
                if "__REALTIME_TIMESTAMP" in message:
                    timestamp = datetime.datetime.utcfromtimestamp(message["__REALTIME_TIMESTAMP"])
                else:
                    timestamp = datetime.datetime.utcnow()

                message["timestamp"] = timestamp
                index_name = "{}-{}".format(self.index_name, datetime.datetime.date(timestamp))
                if index_name not in self.indices:
                    self.create_index_and_mappings(index_name)

                actions.append({
                    "_index": index_name,
                    "_type": "journal_msg",
                    "_source": message,
                })
            if actions:
                helpers.bulk(self.es, actions)
                self.log.debug("Sent %d log events to ES, took: %.2fs",
                               len(message_batch), time.monotonic() - start_time)
        except Exception as ex:  # pylint: disable=broad-except
            self.log.warning("Problem sending logs to ES: %r", ex)
            return False
        return True
Example #24
0
    def spin(self, timeout=None):
        """
        Runs background processes until timeout expires.
        Note that all processing is implemented in one thread.
        :param timeout: The method will return once this amount of time expires.
                        If None, the method will never return.
                        If zero, the method will handle only those events that are ready, then return immediately.
        """
        if timeout != 0:
            deadline = (time.monotonic() + timeout) if timeout is not None else sys.float_info.max

            def execute_once():
                next_event_at = self._poll_scheduler_and_get_next_deadline()
                if next_event_at is None:
                    next_event_at = sys.float_info.max

                read_timeout = min(next_event_at, deadline) - time.monotonic()
                read_timeout = max(read_timeout, 0)
                read_timeout = min(read_timeout, 1)

                frame = self._can_driver.receive(read_timeout)
                if frame:
                    self._recv_frame(frame)

            execute_once()
            while time.monotonic() < deadline:
                execute_once()
        else:
            while True:
                frame = self._can_driver.receive(0)
                if frame:
                    self._recv_frame(frame)
                else:
                    break
            self._poll_scheduler_and_get_next_deadline()
Example #25
0
    def _select_next_server(self):
        """
        Looks up in the server pool for an available server
        and attempts to connect.
        """
        srv = None
        now = time.monotonic()
        for s in self._server_pool:
            if s.reconnects > self.options["max_reconnect_attempts"]:
                continue
            if s.did_connect and now > s.last_attempt + self.options["reconnect_time_wait"]:
                yield from asyncio.sleep(self.options["reconnect_time_wait"], loop=self._loop)
            try:
                s.last_attempt = time.monotonic()
                r, w = yield from asyncio.open_connection(
                    s.uri.hostname,
                    s.uri.port,
                    loop=self._loop,
                    limit=DEFAULT_BUFFER_SIZE)
                srv = s
                self._io_reader = r
                self._io_writer = w
                s.did_connect = True
                break
            except Exception as e:
                self._err = e

        if srv is None:
            raise ErrNoServers
        self._current_server = srv
    def run_baton_query(self, baton_binary: BatonBinary, program_arguments: List[str]=None, input_data: Any=None) \
            -> List[Dict]:
        """
        Runs a baton query.
        :param baton_binary: the baton binary to use
        :param program_arguments: arguments to give to the baton binary
        :param input_data: input data to the baton binary
        :return: parsed serialization returned by baton
        """
        if program_arguments is None:
            program_arguments = []

        baton_binary_location = os.path.join(self._baton_binaries_directory, baton_binary.value)
        program_arguments = [baton_binary_location] + program_arguments

        _logger.info("Running baton command: '%s' with data '%s'" % (program_arguments, input_data))
        start_at = time.monotonic()
        baton_out = self._run_command(program_arguments, input_data=input_data)
        time_taken_to_run_query = time.monotonic() - start_at
        _logger.debug("baton output (took %s seconds, wall time): %s" % (time_taken_to_run_query, baton_out))

        if len(baton_out) == 0:
            return []
        if len(baton_out) > 0 and baton_out[0] != '[':
            # If information about multiple files is returned, baton does not return valid JSON - it returns a line
            # separated list of JSON, where each line corresponds to a different file
            baton_out = "[%s]" % baton_out.replace('\n', ',')

        baton_out_as_json = json.loads(baton_out)
        BatonRunner._raise_any_errors_given_in_baton_out(baton_out_as_json)

        return baton_out_as_json
Example #27
0
    def _handle_status(self, msg):
        """
        Reimplemented to refresh the namespacebrowser after kernel
        restarts
        """
        state = msg['content'].get('execution_state', '')
        msg_type = msg['parent_header'].get('msg_type', '')
        if state == 'starting':
            # This is needed to show the time a kernel
            # has been alive in each console.
            self.ipyclient.t0 = time.monotonic()
            self.ipyclient.timer.timeout.connect(self.ipyclient.show_time)
            self.ipyclient.timer.start(1000)

            # This handles restarts when the kernel dies
            # unexpectedly
            if not self._kernel_is_starting:
                self._kernel_is_starting = True
        elif state == 'idle' and msg_type == 'shutdown_request':
            # This handles restarts asked by the user
            if self.namespacebrowser is not None:
                self.set_namespace_view_settings()
                self.refresh_namespacebrowser()
            self.ipyclient.t0 = time.monotonic()
        else:
            super(NamepaceBrowserWidget, self)._handle_status(msg)
Example #28
0
 def dispatch(self, wdelay=0):
     """ xxx """
     self.round += 1
     start = time.monotonic()
     with self._lock:
         xqueue = self._queue[:]
         while True:
             if not xqueue:
                 break
             event = xqueue[0]
             now = time.monotonic()
             if event.atime > (now + wdelay):
                 time.sleep(wdelay)
                 break
             elif event.atime <= (now + wdelay) and event.atime > now:
                 # D("sleep {}".format(event.atime - now))
                 time.sleep(event.atime - now)
             else:  # event.atime <= now:
                 xqueue.pop(0)
                 more = event._call()
                 if more is not None and more > 0:
                     event.atime = time.monotonic() + event.delay
                 else:
                     self._queue.pop(0)
         heapq.heapify(self._queue)
     return time.monotonic() - start
Example #29
0
    def run(self):
        while True:
            item = None
            try:
                item = self.work_queue.get(timeout=2)
            except(Empty):
                pass

            if item:
                self.transmit_buffer.append(item)
                self.last_time = time.monotonic()

            delta = time.monotonic() - self.last_time

            if self.work_queue.empty() and len(self.transmit_buffer) > 0:
                if (delta > 30) or (len(self.transmit_buffer) > 1):
                    self._transmit.set()

            if self._transmit.is_set() and self.work_queue.empty():
                self.transmit()

            if self.stopped():
                while len(self.transmit_buffer) > 0:
                    self.transmit()
                return
Example #30
0
def test_monotonic():
    times = [time.monotonic() for _ in range(100)]
    for t1, t2 in zip(times[:-1], times[1:]):
        assert t1 <= t2
def build_examples(variant):
    global exit_status, success_count, fail_count, skip_count, build_format, build_separator

    print('\n')
    print(build_separator)
    print('| {:^79} |'.format('Board ' + variant))
    print(build_separator)
    print((build_format + '| {:6} |').format('Library', 'Example', 'Result',
                                             'Time'))
    print(build_separator)

    fqbn = "adafruit:nrf52:{}:softdevice={},debug=l0".format(
        variant, 's140v6' if variant != 'feather52832' else 's132v6')

    for sketch in glob.iglob('libraries/**/*.ino', recursive=True):
        start_time = time.monotonic()

        # Skip if contains: ".board.test.skip" or ".all.test.skip"
        # Skip if not contains: ".board.test.only" for a specific board
        sketchdir = os.path.dirname(sketch)
        if os.path.exists(sketchdir + '/.all.test.skip') or os.path.exists(
                sketchdir + '/.' + variant + '.test.skip'):
            success = "\033[33mskipped\033[0m  "
        elif glob.glob(sketchdir + "/.*.test.only") and not os.path.exists(
                sketchdir + '/.' + variant + '.test.only'):
            success = "\033[33mskipped\033[0m  "
        else:
            # TODO - preferably, would have STDERR show up in **both** STDOUT and STDERR.
            #        preferably, would use Python logging handler to get both distinct outputs and one merged output
            #        for now, split STDERR when building with all warnings enabled, so can detect warning/error output.
            if all_warnings:
                build_result = subprocess.run(
                    "arduino-cli compile --warnings all --fqbn {} {}".format(
                        fqbn, sketch),
                    shell=True,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
            else:
                build_result = subprocess.run(
                    "arduino-cli compile --warnings default --fqbn {} {}".
                    format(fqbn, sketch),
                    shell=True,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT)

            # get stderr into a form where len(warningLines) indicates a true warning was output to stderr
            warningLines = []
            if all_warnings and build_result.stderr:
                tmpWarningLines = build_result.stderr.decode(
                    "utf-8").splitlines()
                warningLines = list(
                    filter(errorOutputFilter, (tmpWarningLines)))

            if build_result.returncode != 0:
                exit_status = build_result.returncode
                success = "\033[31mfailed\033[0m   "
                fail_count += 1
            elif len(warningLines) != 0:
                exit_status = -1
                success = "\033[31mwarnings\033[0m "
                fail_count += 1
            else:
                success = "\033[32msucceeded\033[0m"
                success_count += 1

        build_duration = time.monotonic() - start_time

        print((build_format + '| {:5.2f}s |').format(
            sketch.split(os.path.sep)[1], os.path.basename(sketch), success,
            build_duration))

        if success != "\033[33mskipped\033[0m  ":
            if build_result.returncode != 0:
                print(build_result.stdout.decode("utf-8"))
                if (build_result.stderr):
                    print(build_result.stderr.decode("utf-8"))
            if len(warningLines) != 0:
                for line in warningLines:
                    print(line)
        else:
            skip_count += 1
Example #32
0
    def __init__(self,
                 host,
                 port,
                 *,
                 client_id='aiokafka',
                 request_timeout_ms=40000,
                 api_version=(0, 8, 2),
                 ssl_context=None,
                 security_protocol='PLAINTEXT',
                 max_idle_ms=None,
                 on_close=None,
                 sasl_mechanism=None,
                 sasl_plain_password=None,
                 sasl_plain_username=None,
                 sasl_kerberos_service_name='kafka',
                 sasl_kerberos_domain_name=None,
                 sasl_oauth_token_provider=None,
                 version_hint=None):
        loop = get_running_loop()

        if sasl_mechanism == "GSSAPI":
            assert gssapi is not None, "gssapi library required"

        if sasl_mechanism == "OAUTHBEARER":
            if sasl_oauth_token_provider is None or \
                    not isinstance(
                        sasl_oauth_token_provider, AbstractTokenProvider):
                raise ValueError("sasl_oauth_token_provider needs to be \
                    provided implementing aiokafka.abc.AbstractTokenProvider")
            assert callable(getattr(
                sasl_oauth_token_provider, "token", None)), (
                    'sasl_oauth_token_provider must implement method #token()')

        self._loop = loop
        self._host = host
        self._port = port
        self._request_timeout = request_timeout_ms / 1000
        self._api_version = api_version
        self._client_id = client_id
        self._ssl_context = ssl_context
        self._security_protocol = security_protocol
        self._sasl_mechanism = sasl_mechanism
        self._sasl_plain_username = sasl_plain_username
        self._sasl_plain_password = sasl_plain_password
        self._sasl_kerberos_service_name = sasl_kerberos_service_name
        self._sasl_kerberos_domain_name = sasl_kerberos_domain_name
        self._sasl_oauth_token_provider = sasl_oauth_token_provider

        # Version hint is the version determined by initial client bootstrap
        self._version_hint = version_hint
        self._version_info = VersionInfo({})

        self._reader = self._writer = self._protocol = None
        # Even on small size seems to be a bit faster than list.
        # ~2x on size of 2 in Python3.6
        self._requests = collections.deque()
        self._read_task = None
        self._correlation_id = 0
        self._closed_fut = None

        self._max_idle_ms = max_idle_ms
        self._last_action = time.monotonic()
        self._idle_handle = None

        self._on_close_cb = on_close

        if loop.get_debug():
            self._source_traceback = traceback.extract_stack(sys._getframe(1))
Example #33
0
 def recalculate_delays(self):
     """ calls update_averages() on ServerSource.statistics (GlobalStatistics)
         and WindowSource.statistics (WindowPerformanceStatistics) for each window id in calculate_window_ids,
         this runs in the worker thread.
     """
     self.calculate_timer = 0
     if self.is_closed():
         return
     now = monotonic()
     self.calculate_last_time = now
     p = self.protocol
     if not p:
         return
     conn = p._conn
     if not conn:
         return
     #we can't assume that 'self' is a full ClientConnection object:
     stats = getattr(self, "statistics", None)
     if stats:
         stats.bytes_sent.append((now, conn.output_bytecount))
         stats.update_averages()
     self.update_bandwidth_limits()
     wids = tuple(self.calculate_window_ids
                  )  #make a copy so we don't clobber new wids
     focus = self.get_focus()
     sources = self.window_sources.items()
     maximized_wids = tuple(wid for wid, source in sources
                            if source is not None and source.maximized)
     fullscreen_wids = tuple(wid for wid, source in sources
                             if source is not None and source.fullscreen)
     log(
         "recalculate_delays() wids=%s, focus=%s, maximized=%s, fullscreen=%s",
         wids, focus, maximized_wids, fullscreen_wids)
     for wid in wids:
         #this is safe because we only add to this set from other threads:
         self.calculate_window_ids.remove(wid)
         self.calculate_window_pixels.pop(wid, None)
         ws = self.window_sources.get(wid)
         if ws is None:
             continue
         try:
             ws.statistics.update_averages()
             ws.calculate_batch_delay(
                 wid == focus,
                 len(fullscreen_wids) > 0 and wid not in fullscreen_wids,
                 len(maximized_wids) > 0 and wid not in maximized_wids)
             ws.reconfigure()
         except Exception:
             log.error("error on window %s", wid, exc_info=True)
         if self.is_closed():
             return
         #allow other threads to run
         #(ideally this would be a low priority thread)
         sleep(0)
     #calculate weighted average as new global default delay:
     wdimsum, wdelay, tsize, tcount = 0, 0, 0, 0
     for ws in tuple(self.window_sources.values()):
         if ws.batch_config.last_updated <= 0:
             continue
         w, h = ws.window_dimensions
         tsize += w * h
         tcount += 1
         time_w = 2.0 + (now - ws.batch_config.last_updated
                         )  #add 2 seconds to even things out
         weight = int(w * h * time_w)
         wdelay += ws.batch_config.delay * weight
         wdimsum += weight
     if wdimsum > 0 and tcount > 0:
         #weighted delay:
         delay = wdelay // wdimsum
         self.global_batch_config.last_delays.append((now, delay))
         self.global_batch_config.delay = delay
         #store the delay as a normalized value per megapixel
         #so we can adapt it to different window sizes:
         avg_size = tsize // tcount
         ratio = sqrt(1000000.0 / avg_size)
         normalized_delay = int(delay * ratio)
         self.global_batch_config.delay_per_megapixel = normalized_delay
         log(
             "delay_per_megapixel=%i, delay=%i, for wdelay=%i, avg_size=%i, ratio=%.2f",
             normalized_delay, delay, wdelay, avg_size, ratio)
rfm9x = adafruit_rfm9x.RFM9x(spi, CS, RESET, RADIO_FREQ_MHZ)

# enable CRC checking
rfm9x.enable_crc = True
# set node addresses
rfm9x.node = 2
rfm9x.destination = 1
# initialize counter
counter = 0
# send a broadcast message from my_node with ID = counter
rfm9x.send(bytes("startup message from node {} ".format(rfm9x.node), "UTF-8"))

# Wait to receive packets.
print("Waiting for packets...")
# initialize flag and timer
time_now = time.monotonic()
while True:
    # Look for a new packet: only accept if addresses to my_node
    packet = rfm9x.receive(with_header=True)
    # If no packet was received during the timeout then None is returned.
    if packet is not None:
        # Received a packet!
        # Print out the raw bytes of the packet:
        print("Received (raw header):", [hex(x) for x in packet[0:4]])
        print("Received (raw payload): {0}".format(packet[4:]))
        print("Received RSSI: {0}".format(rfm9x.last_rssi))
        # send reading after any packet received
        counter = counter + 1
        # after 10 messages send a response to destination_node from my_node with ID = counter&0xff
        if counter % 10 == 0:
            time.sleep(0.5)  # brief delay before responding
Example #35
0
    def request(self, method, url, name=None, catch_response=False, **kwargs):
        """
        Constructs and sends a :py:class:`requests.Request`.
        Returns :py:class:`requests.Response` object.

        :param method: method for the new :class:`Request` object.
        :param url: URL for the new :class:`Request` object.
        :param name: (optional) An argument that can be specified to use as label in Locust's statistics instead of the URL path.
          This can be used to group different URL's that are requested into a single entry in Locust's statistics.
        :param catch_response: (optional) Boolean argument that, if set, can be used to make a request return a context manager
          to work as argument to a with statement. This will allow the request to be marked as a fail based on the content of the
          response, even if the response code is ok (2xx). The opposite also works, one can use catch_response to catch a request
          and then mark it as successful even if the response code was not (i.e 500 or 404).
        :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
        :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
        :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
        :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
        :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload.
        :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth.
        :param timeout: (optional) How long in seconds to wait for the server to send data before giving up, as a float,
            or a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple.
        :type timeout: float or tuple
        :param allow_redirects: (optional) Set to True by default.
        :type allow_redirects: bool
        :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
        :param stream: (optional) whether to immediately download the response content. Defaults to ``False``.
        :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
        :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
        """

        # prepend url with hostname unless it's already an absolute URL
        url = self._build_url(url)

        # store meta data that is used when reporting the request to locust's statistics
        request_meta = {}

        # set up pre_request hook for attaching meta data to the request object
        request_meta["method"] = method
        request_meta["start_time"] = time.monotonic()

        response = self._send_request_safe_mode(method, url, **kwargs)

        # record the consumed time
        request_meta["response_time"] = (time.monotonic() -
                                         request_meta["start_time"]) * 1000

        request_meta["name"] = name or (response.history
                                        and response.history[0]
                                        or response).request.path_url
        # get the length of the content, but if the argument stream is set to True, we take
        # the size from the content-length header, in order to not trigger fetching of the body
        if kwargs.get("stream", False):
            request_meta["content_size"] = int(
                response.headers.get("content-length") or 0)
        else:
            request_meta["content_size"] = len(response.content or b"")

        if catch_response:
            response.locust_request_meta = request_meta
            return ResponseContextManager(response,
                                          request_success=self.request_success,
                                          request_failure=self.request_failure)
        else:
            if name:
                # Since we use the Exception message when grouping failures, in order to not get
                # multiple failure entries for different URLs for the same name argument, we need
                # to temporarily override the response.url attribute
                orig_url = response.url
                response.url = name
            try:
                response.raise_for_status()
            except RequestException as e:
                self.request_failure.fire(
                    request_type=request_meta["method"],
                    name=request_meta["name"],
                    response_time=request_meta["response_time"],
                    response_length=request_meta["content_size"],
                    exception=e,
                )
            else:
                self.request_success.fire(
                    request_type=request_meta["method"],
                    name=request_meta["name"],
                    response_time=request_meta["response_time"],
                    response_length=request_meta["content_size"],
                )
            if name:
                response.url = orig_url
            return response
Example #36
0
def timed_tune_run(name: str,
                   num_samples: int,
                   results_per_second: int = 1,
                   trial_length_s: int = 1,
                   max_runtime: int = 300,
                   checkpoint_freq_s: int = -1,
                   checkpoint_size_b: int = 0,
                   **tune_kwargs):
    durable = "sync_config" in tune_kwargs and \
              tune_kwargs["sync_config"].upload_dir and \
              tune_kwargs["sync_config"].upload_dir.startswith("s3://")

    sleep_time = 1. / results_per_second
    num_iters = int(trial_length_s / sleep_time)
    checkpoint_iters = -1
    if checkpoint_freq_s >= 0:
        checkpoint_iters = int(checkpoint_freq_s / sleep_time)

    config = {
        "score": tune.uniform(0., 1.),
        "num_iters": num_iters,
        "sleep_time": sleep_time,
        "checkpoint_iters": checkpoint_iters,
        "checkpoint_size_b": checkpoint_size_b,
    }

    print(f"Starting benchmark with config: {config}")

    run_kwargs = {"reuse_actors": True, "verbose": 2}
    run_kwargs.update(tune_kwargs)

    _train = function_trainable

    aws_key_id = os.getenv("AWS_ACCESS_KEY_ID", "")
    aws_secret = os.getenv("AWS_SECRET_ACCESS_KEY", "")
    aws_session = os.getenv("AWS_SESSION_TOKEN", "")

    if durable:

        class AwsDurableTrainable(TestDurableTrainable):
            AWS_ACCESS_KEY_ID = aws_key_id
            AWS_SECRET_ACCESS_KEY = aws_secret
            AWS_SESSION_TOKEN = aws_session

            def setup_env(self):
                if self.AWS_ACCESS_KEY_ID:
                    os.environ["AWS_ACCESS_KEY_ID"] = self.AWS_ACCESS_KEY_ID
                if self.AWS_SECRET_ACCESS_KEY:
                    os.environ[
                        "AWS_SECRET_ACCESS_KEY"] = self.AWS_SECRET_ACCESS_KEY
                if self.AWS_SESSION_TOKEN:
                    os.environ["AWS_SESSION_TOKEN"] = self.AWS_SESSION_TOKEN

        _train = AwsDurableTrainable
        run_kwargs["checkpoint_freq"] = checkpoint_iters

    start_time = time.monotonic()
    analysis = tune.run(
        _train,
        config=config,
        num_samples=num_samples,
        raise_on_failed_trial=False,
        **run_kwargs)
    time_taken = time.monotonic() - start_time

    result = {
        "time_taken": time_taken,
        "trial_states": dict(
            Counter([trial.status for trial in analysis.trials])),
        "last_update": time.time()
    }

    test_output_json = os.environ.get("TEST_OUTPUT_JSON",
                                      "/tmp/tune_test.json")
    with open(test_output_json, "wt") as f:
        json.dump(result, f)

    if time_taken > max_runtime:
        print(f"The {name} test took {time_taken:.2f} seconds, but should not "
              f"have exceeded {max_runtime:.2f} seconds. Test failed. \n\n"
              f"--- FAILED: {name.upper()} ::: "
              f"{time_taken:.2f} > {max_runtime:.2f} ---")
    else:
        print(f"The {name} test took {time_taken:.2f} seconds, which "
              f"is below the budget of {max_runtime:.2f} seconds. "
              f"Test successful. \n\n"
              f"--- PASSED: {name.upper()} ::: "
              f"{time_taken:.2f} <= {max_runtime:.2f} ---")
Example #37
0
# Initialize the pyportal object and let us know what data to fetch and where
# to display it
pyportal = PyPortal(url=DATA_SOURCE,
                    json_path=DATA_LOCATION,
                    status_neopixel=board.NEOPIXEL,
                    default_bg=0x000000)


gfx = openweather_graphics.OpenWeather_Graphics(pyportal.splash)


localtile_refresh = None
weather_refresh = None
while True:
    # only query the online time once per hour (and on first run)
    if (not localtile_refresh) or (time.monotonic() - localtile_refresh) > 3600:
        try:
            print("Getting time from internet!")
            pyportal.get_local_time()
            localtile_refresh = time.monotonic()
        except RuntimeError as e:
            print("Some error occured, retrying! -", e)
            continue
    try:
        value = pyportal.fetch()

        gfx.display_time(value)
        weather_refresh = time.monotonic()

        gfx.update_time()
        time.sleep(20)
Example #38
0
        r = p[1]
        replaced = p[2]

        #print(steps, r, pos)

        if len(replaced) < min_mol:
            min_mol = len(replaced)
            print('new min', min_mol, replaced)

        if replaced == tgt:
            print('matched!', steps + 1)
            quit()
            amts.append(steps + 1)
            if steps + 1 < min_steps:
                min_steps = steps + 1
        elif len(replaced) > len(curmol):
            print('longer', replaced, curmol)
        else:
            amts.append(recur(replaced, steps + 1))

    #if nfound == 0:
    #    print('eol', len(curmol), steps, len(unique_ones), curmol)

    return 99999999 if len(amts) == 0 else min(amts)


start_time = time.monotonic()
steps = recur(end, 0)
print(steps)
end_time = time.monotonic()
print(timedelta(seconds=end_time - start_time))
        print((build_format + '| {:5.2f}s |').format(
            sketch.split(os.path.sep)[1], os.path.basename(sketch), success,
            build_duration))

        if success != "\033[33mskipped\033[0m  ":
            if build_result.returncode != 0:
                print(build_result.stdout.decode("utf-8"))
                if (build_result.stderr):
                    print(build_result.stderr.decode("utf-8"))
            if len(warningLines) != 0:
                for line in warningLines:
                    print(line)
        else:
            skip_count += 1


build_time = time.monotonic()

for board in build_boards:
    build_examples(board)

print(build_separator)
build_time = time.monotonic() - build_time
print(
    "Build Summary: {} \033[32msucceeded\033[0m, {} \033[31mfailed\033[0m, {} \033[33mskipped\033[0m and took {:.2f}s"
    .format(success_count, fail_count, skip_count, build_time))
print(build_separator)

sys.exit(exit_status)
 def fetch_context(self):
     self._fetch_count += 1
     yield
     self._fetch_count -= 1
     if self._fetch_count == 0:
         self._last_fetch_ended = time.monotonic()
Example #41
0
 def touch_last_updated(self):
     self.last_updated = time.monotonic()
Example #42
0
 def _make_report(self) -> Tuple[list, float]:
     total_duration = time.monotonic() - self.start_time
     report = [[a, d, 100. * np.sum(d) / total_duration]
               for a, d in self.recorded_durations.items()]
     report.sort(key=lambda x: x[2], reverse=True)
     return report, total_duration
 def fetcher_idle_time(self):
     """ How much time (in seconds) spent without consuming any records """
     if self._fetch_count == 0:
         return time.monotonic() - self._last_fetch_ended
     else:
         return 0
Example #44
0
def sync_main():
    init = time.monotonic()
    list(map(sync_request, sites))
    print(f'total time elapsed: {time.monotonic() - init}')
Example #45
0
def simple_burst(
        profile_file, duration, framesize, rate, warmup_time, port_0, port_1,
        latency, async_start=False, traffic_directions=2, force=False):
    """Send traffic and measure packet loss and latency.

    Procedure:
     - reads the given traffic profile with streams,
     - connects to the T-rex client,
     - resets the ports,
     - removes all existing streams,
     - adds streams from the traffic profile to the ports,
     - if the warm-up time is more than 0, sends the warm-up traffic, reads the
       statistics,
     - clears the statistics from the client,
     - starts the traffic,
     - waits for the defined time (or runs forever if async mode is defined),
     - stops the traffic,
     - reads and displays the statistics and
     - disconnects from the client.

    :param profile_file: A python module with T-rex traffic profile.
    :param framesize: Frame size.
    :param duration: Duration of traffic run in seconds (-1=infinite).
    :param rate: Traffic rate [percentage, pps, bps].
    :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
    :param port_0: Port 0 on the traffic generator.
    :param port_1: Port 1 on the traffic generator.
    :param latency: With latency stats.
    :param async_start: Start the traffic and exit.
    :param traffic_directions: Bidirectional (2) or unidirectional (1) traffic.
    :param force: Force start regardless of ports state.
    :type profile_file: str
    :type framesize: int or str
    :type duration: float
    :type rate: str
    :type warmup_time: float
    :type port_0: int
    :type port_1: int
    :type latency: bool
    :type async_start: bool
    :type traffic_directions: int
    :type force: bool
    """
    client = None
    total_rcvd = 0
    total_sent = 0
    approximated_duration = 0
    approximated_rate = 0
    lost_a = 0
    lost_b = 0
    lat_a = u"-1/-1/-1/"
    lat_b = u"-1/-1/-1/"

    # Read the profile:
    try:
        print(f"### Profile file:\n{profile_file}")
        profile = STLProfile.load(
            profile_file, direction=0, port_id=0, framesize=framesize
        )
        streams = profile.get_streams()
    except STLError as err:
        print(f"Error while loading profile '{profile_file}' {err!r}")
        sys.exit(1)

    try:
        # Create the client:
        client = STLClient()
        # Connect to server:
        client.connect()
        # Prepare our ports (the machine has 0 <--> 1 with static route):
        client.reset(ports=[port_0, port_1])
        client.remove_all_streams(ports=[port_0, port_1])

        if u"macsrc" in profile_file:
            client.set_port_attr(ports=[port_0, port_1], promiscuous=True)
        if isinstance(framesize, int):
            client.add_streams(streams[0], ports=[port_0])
            if traffic_directions > 1:
                client.add_streams(streams[1], ports=[port_1])
        elif isinstance(framesize, str):
            client.add_streams(streams[0:3], ports=[port_0])
            if traffic_directions > 1:
                client.add_streams(streams[3:6], ports=[port_1])
        if latency:
            try:
                if isinstance(framesize, int):
                    client.add_streams(streams[2], ports=[port_0])
                    if traffic_directions > 1:
                        client.add_streams(streams[3], ports=[port_1])
                elif isinstance(framesize, str):
                    latency = False
            except STLError:
                # Disable latency if NIC does not support requested stream type
                print(u"##### FAILED to add latency streams #####")
                latency = False
        ports = [port_0]
        if traffic_directions > 1:
            ports.append(port_1)
        # Warm-up phase:
        if warmup_time > 0:
            # Clear the stats before injecting:
            client.clear_stats()

            # Choose rate and start traffic:
            client.start(ports=ports, mult=rate, duration=warmup_time,
                         force=force)

            # Block until done:
            time_start = time.monotonic()
            client.wait_on_traffic(ports=ports, timeout=warmup_time+30)
            time_stop = time.monotonic()
            approximated_duration = time_stop - time_start

            if client.get_warnings():
                for warning in client.get_warnings():
                    print(warning)

            # Read the stats after the test:
            stats = client.get_stats()

            print(u"##### Warmup statistics #####")
            print(json.dumps(stats, indent=4, separators=(u",", u": ")))

            lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
            if traffic_directions > 1:
                lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]

            print(f"\npackets lost from {port_0} --> {port_1}: {lost_a} pkts")
            if traffic_directions > 1:
                print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")

        # Clear the stats before injecting:
        client.clear_stats()
        lost_a = 0
        lost_b = 0

        # Choose rate and start traffic:
        client.start(ports=ports, mult=rate, duration=duration)

        if async_start:
            # For async stop, we need to export the current snapshot.
            xsnap0 = client.ports[0].get_xstats().reference_stats
            print(f"Xstats snapshot 0: {xsnap0!r}")
            if traffic_directions > 1:
                xsnap1 = client.ports[1].get_xstats().reference_stats
                print(f"Xstats snapshot 1: {xsnap1!r}")
        else:
            # Block until done:
            time_start = time.monotonic()
            client.wait_on_traffic(ports=ports, timeout=duration+30)
            time_stop = time.monotonic()
            approximated_duration = time_stop - time_start

            if client.get_warnings():
                for warning in client.get_warnings():
                    print(warning)

            # Read the stats after the test
            stats = client.get_stats()

            print(u"##### Statistics #####")
            print(json.dumps(stats, indent=4, separators=(u",", u": ")))

            lost_a = stats[port_0][u"opackets"] - stats[port_1][u"ipackets"]
            if traffic_directions > 1:
                lost_b = stats[port_1][u"opackets"] - stats[port_0][u"ipackets"]

            # Stats index is not a port number, but "pgid".
            if latency:
                lat_obj = stats[u"latency"][0][u"latency"]
                lat_a = fmt_latency(
                    str(lat_obj[u"total_min"]), str(lat_obj[u"average"]),
                    str(lat_obj[u"total_max"]), str(lat_obj[u"hdrh"]))
                if traffic_directions > 1:
                    lat_obj = stats[u"latency"][1][u"latency"]
                    lat_b = fmt_latency(
                        str(lat_obj[u"total_min"]), str(lat_obj[u"average"]),
                        str(lat_obj[u"total_max"]), str(lat_obj[u"hdrh"]))

            if traffic_directions > 1:
                total_sent = stats[0][u"opackets"] + stats[1][u"opackets"]
                total_rcvd = stats[0][u"ipackets"] + stats[1][u"ipackets"]
            else:
                total_sent = stats[port_0][u"opackets"]
                total_rcvd = stats[port_1][u"ipackets"]
            try:
                approximated_rate = total_sent / approximated_duration
            except ZeroDivisionError:
                pass

            print(f"\npackets lost from {port_0} --> {port_1}: {lost_a} pkts")
            if traffic_directions > 1:
                print(f"packets lost from {port_1} --> {port_0}: {lost_b} pkts")

    except STLError as ex_error:
        print(ex_error, file=sys.stderr)
        sys.exit(1)

    finally:
        if async_start:
            if client:
                client.disconnect(stop_traffic=False, release_ports=True)
        else:
            if client:
                client.disconnect()
            print(
                f"rate={rate!r}, totalReceived={total_rcvd}, "
                f"totalSent={total_sent}, frameLoss={lost_a + lost_b}, "
                f"targetDuration={duration!r}, "
                f"approximatedDuration={approximated_duration!r}, "
                f"approximatedRate={approximated_rate}, "
                f"latencyStream0(usec)={lat_a}, latencyStream1(usec)={lat_b}, "
            )
Example #46
0
    def request(self, method, url, hooks=None, *args, **kwargs):
        """Request URL.

        This extends the FuturesSession request method to calculate a response
        time metric to each request.

        It is taken (almost) directly from the following Stack Overflow answer:
        https://github.com/ross/requests-futures#working-in-the-background

        Keyword Arguments:
        self                   -- This object.
        method                 -- String containing method desired for request.
        url                    -- String containing URL for request.
        hooks                  -- Dictionary containing hooks to execute after
                                  request finishes.
        args                   -- Arguments.
        kwargs                 -- Keyword arguments.

        Return Value:
        Request object.
        """
        # Record the start time for the request.
        if hooks is None:
            hooks = {}
        start = monotonic()

        def response_time(resp, *args, **kwargs):
            """Response Time Hook.

            Keyword Arguments:
            resp                   -- Response object.
            args                   -- Arguments.
            kwargs                 -- Keyword arguments.

            Return Value:
            Nothing.
            """
            resp.elapsed = monotonic() - start

            return

        # Install hook to execute when response completes.
        # Make sure that the time measurement hook is first, so we will not
        # track any later hook's execution time.
        try:
            if isinstance(hooks["response"], list):
                hooks["response"].insert(0, response_time)
            elif isinstance(hooks["response"], tuple):
                # Convert tuple to list and insert time measurement hook first.
                hooks["response"] = list(hooks["response"])
                hooks["response"].insert(0, response_time)
            else:
                # Must have previously contained a single hook function,
                # so convert to list.
                hooks["response"] = [response_time, hooks["response"]]
        except KeyError:
            # No response hook was already defined, so install it ourselves.
            hooks["response"] = [response_time]

        return super(SherlockFuturesSession, self).request(method,
                                                           url,
                                                           hooks=hooks,
                                                           *args, **kwargs)
Example #47
0
 def start(self, action_name: str) -> None:
     if action_name in self.current_actions:
         raise ValueError(
             f"Attempted to start {action_name} which has already started.")
     self.current_actions[action_name] = time.monotonic()
Example #48
0
# Formatting for the launch time text
magtag.add_text(text_font="Lato-Regular-14.bdf",
                text_position=(10, 58),
                text_scale=1,
                text_transform=time_transform)

# Formatting for the details text
magtag.add_text(text_font="Lato-Regular-10.bdf",
                text_position=(10, 94),
                text_scale=1,
                line_spacing=0.8,
                text_wrap=47,
                text_transform=details_transform)

timestamp = None

# Loop forever, checking the time elapsed and updating the screen after a set time
# When power savings code is available, this can be used to save battery life below.
while True:
    if not timestamp or (time.monotonic() -
                         timestamp) > 300:  # once every 5 minutes...
        try:
            # This statement gets the JSON data and displays it automagically
            value = magtag.fetch()
            print("Response is", value)
        except (ValueError, RuntimeError) as e:
            print("Some error occured, retrying! -", e)
    timestamp = time.monotonic()
# END
Example #49
0
    def _on_timer(self):
        try:
            measurement = collections.OrderedDict(time=time.monotonic() -
                                                  self._t0)

            # Varying of variables
            condition_changed = False

            if self._measurements_for_condition >= self._measurements_per_condition:
                self._measurements_for_condition = 0

                try:
                    self._variable_values = next(self._variable_values_iter)
                except StopIteration:
                    self.stop()
                    return

                condition_changed = True

            for variable, value in zip(self._variables, self._variable_values):
                measurement[variable] = value

            if condition_changed:
                self.apply_condition(measurement)

            result = self.measure(measurement)

            if result or result is None:
                self._measurements_for_condition += 1

            columns, data = zip(*measurement.items())

            if self._data is None:
                dtype = np.dtype([(name, self.promote_datatype(type(d)))
                                  for name, d in zip(columns, data)])

                self._data = np.empty((2, ), dtype)
                self._len = 0
            else:
                if columns != self._data.dtype.names:
                    raise RuntimeError('Measure returned different columns '
                                       '(or different order) than first call')

            buffer_size = self._data.shape[0]
            if self._len >= buffer_size:
                # Double the buffer size
                tmp = self._data
                self._data = np.empty((buffer_size * 2, ), self._data.dtype)
                self._data[:buffer_size] = tmp

            idx = self._len
            self._data[idx] = data
            self._len += 1

            if self._first:
                self.log_start(self._data[idx])

            self.log(self._data[idx])

            self.plot(self._data[:self._len])

            self._first = False

        except:
            self._timer.stop()
            raise
    response.close()
else:
    json_data = json.loads(FAKE_DATA)

# update the labels to display values
elapsed_time_val.text = json_data["launchElapsedTime"]
distance_from_earth_val.text = "{}km".format(json_data["distanceEarthKm"])
distance_to_l2_val.text = "{}km".format(str(json_data["distanceL2Km"]))
percent_complete_val.text = "{}%".format(str(json_data["percentageCompleted"]))
speed_val.text = "{}km/s".format(str(json_data["speedKmS"]))
timestamp_val.text = str(json_data["timestamp"])
temperature_val.text = "{}c | {}c\n{}c | {}c".format(
    json_data["tempC"]["tempWarmSide1C"],
    json_data["tempC"]["tempCoolSide1C"],
    json_data["tempC"]["tempWarmSide2C"],
    json_data["tempC"]["tempCoolSide2C"],
)

# show the group
display.show(main_group)

# refresh display
try_refresh()

# Create a an alarm that will trigger to wake us up
time_alarm = alarm.time.TimeAlarm(monotonic_time=time.monotonic() + SLEEP_TIME)

# Exit the program, and then deep sleep until the alarm wakes us.
alarm.exit_and_deep_sleep_until_alarms(time_alarm)
# Does not return, so we never get here.
Example #51
0
    def __init__(self,
                 plugin,
                 id_,
                 history_filename,
                 config_options,
                 additional_options,
                 interpreter_versions,
                 connection_file=None,
                 hostname=None,
                 menu_actions=None,
                 slave=False,
                 external_kernel=False,
                 given_name=None,
                 options_button=None,
                 show_elapsed_time=False,
                 reset_warning=True,
                 ask_before_restart=True,
                 css_path=None):
        super(ClientWidget, self).__init__(plugin)
        SaveHistoryMixin.__init__(self, history_filename)

        # --- Init attrs
        self.plugin = plugin
        self.id_ = id_
        self.connection_file = connection_file
        self.hostname = hostname
        self.menu_actions = menu_actions
        self.slave = slave
        self.external_kernel = external_kernel
        self.given_name = given_name
        self.show_elapsed_time = show_elapsed_time
        self.reset_warning = reset_warning
        self.ask_before_restart = ask_before_restart

        # --- Other attrs
        self.options_button = options_button
        self.stop_button = None
        self.reset_button = None
        self.stop_icon = ima.icon('stop')
        self.history = []
        self.allow_rename = True
        self.stderr_dir = None
        self.is_error_shown = False
        self.restart_thread = None

        if css_path is None:
            self.css_path = CSS_PATH
        else:
            self.css_path = css_path

        # --- Widgets
        self.shellwidget = ShellWidget(
            config=config_options,
            ipyclient=self,
            additional_options=additional_options,
            interpreter_versions=interpreter_versions,
            external_kernel=external_kernel,
            local_kernel=True)

        self.infowidget = plugin.infowidget
        self.blank_page = self._create_blank_page()
        self.loading_page = self._create_loading_page()
        # To keep a reference to the page to be displayed
        # in infowidget
        self.info_page = None
        self._before_prompt_is_ready()

        # Elapsed time
        self.time_label = None
        self.t0 = time.monotonic()
        self.timer = QTimer(self)
        self.show_time_action = create_action(
            self,
            _("Show elapsed time"),
            toggled=self.set_elapsed_time_visible)

        # --- Layout
        self.layout = QVBoxLayout()
        toolbar_buttons = self.get_toolbar_buttons()

        hlayout = QHBoxLayout()
        hlayout.addWidget(self.create_time_label())
        hlayout.addStretch(0)
        for button in toolbar_buttons:
            hlayout.addWidget(button)

        self.layout.addLayout(hlayout)
        self.layout.setContentsMargins(0, 0, 0, 0)
        self.layout.addWidget(self.shellwidget)
        self.layout.addWidget(self.infowidget)
        self.setLayout(self.layout)

        # --- Exit function
        self.exit_callback = lambda: plugin.close_client(client=self)

        # --- Dialog manager
        self.dialog_manager = DialogManager()

        # Show timer
        self.update_time_label_visibility()

        # Poll for stderr changes
        self.stderr_mtime = 0
        self.stderr_timer = QTimer(self)
        self.stderr_timer.timeout.connect(self.poll_stderr_file_change)
        self.stderr_timer.setInterval(1000)
        self.stderr_timer.start()
Example #52
0
 def __init__(self, bot):
     self.bot = bot
     self.start_time = time.monotonic()
Example #53
0
 def enter(self, machine):
     print("Start heating")
     # For testing, divide the cook time by 10 (we are impatient!)
     machine.stop_time = time.monotonic() + machine.cook_time
Example #54
0
def as_completed(fs, timeout=None):
    """An iterator over the given futures that yields each as it completes.

    Args:
        fs: The sequence of Futures (possibly created by different Executors) to
            iterate over.
        timeout: The maximum number of seconds to wait. If None, then there
            is no limit on the wait time.

    Returns:
        An iterator that yields the given Futures as they complete (finished or
        cancelled). If any given Futures are duplicated, they will be returned
        once.

    Raises:
        TimeoutError: If the entire result iterator could not be generated
            before the given timeout.
    """
    if timeout is not None:
        end_time = timeout + time.monotonic()

    fs = set(fs)
    total_futures = len(fs)
    with _AcquireFutures(fs):
        finished = set(f for f in fs
                       if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
        pending = fs - finished
        waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
    finished = list(finished)
    try:
        yield from _yield_finished_futures(finished,
                                           waiter,
                                           ref_collect=(fs, ))

        while pending:
            if timeout is None:
                wait_timeout = None
            else:
                wait_timeout = end_time - time.monotonic()
                if wait_timeout < 0:
                    raise TimeoutError('%d (of %d) futures unfinished' %
                                       (len(pending), total_futures))

            waiter.event.wait(wait_timeout)

            with waiter.lock:
                finished = waiter.finished_futures
                waiter.finished_futures = []
                waiter.event.clear()

            # reverse to keep finishing order
            finished.reverse()
            yield from _yield_finished_futures(finished,
                                               waiter,
                                               ref_collect=(fs, pending))

    finally:
        # Remove waiter from unfinished futures
        for f in fs:
            with f._condition:
                f._waiters.remove(waiter)
Example #55
0
        default=False,
        help=
        'Section can be provided together with with -t to print specific page '
        'or with --alltitles to print all titles in specific section')
    arg_parser.add_argument(
        '--find',
        '-f',
        default=False,
        help='Finds all titles from all sections by keyword in page content')
    arg_parser.add_argument(
        "--alltitles",
        action="store_true",
        help='Show all titles in specific section (use with -s)')
    arg_parser.add_argument("--allsections",
                            action="store_true",
                            help='Show all sections')

    return arg_parser.parse_args()


if __name__ == '__main__':

    args = parse_arguments()

    if args.user:
        started_at = time.monotonic()
        onenote = OneNoteDownload(args.user)
        onenote.download()
    else:
        OneNoteOffline().display_notes(args)
Example #56
0
    async def _slurp_changelogs(self) -> None:
        changelog_queue = self.tables.changelog_queue
        tp_to_table = self.tp_to_table

        active_tps = self.active_tps
        standby_tps = self.standby_tps
        active_offsets = self.active_offsets
        standby_offsets = self.standby_offsets
        active_events_received_at = self._active_events_received_at
        standby_events_received_at = self._standby_events_received_at

        buffers = self.buffers
        buffer_sizes = self.buffer_sizes
        processing_times = self._processing_times

        def _maybe_signal_recovery_end() -> None:
            if self.in_recovery and not self.active_remaining_total():
                # apply anything stuck in the buffers
                self.flush_buffers()
                self._set_recovery_ended()
                if self._actives_span is not None:
                    self._actives_span.set_tag('Actives-Ready', True)
                self.signal_recovery_end.set()

        while not self.should_stop:
            try:
                event: EventT = await asyncio.wait_for(changelog_queue.get(),
                                                       timeout=5.0)
            except asyncio.TimeoutError:
                if self.should_stop:
                    return
                _maybe_signal_recovery_end()
                continue

            now = monotonic()
            message = event.message
            tp = message.tp
            offset = message.offset

            offsets: Counter[TP]
            bufsize = buffer_sizes.get(tp)
            is_active = False
            if tp in active_tps:
                is_active = True
                table = tp_to_table[tp]
                offsets = active_offsets
                if bufsize is None:
                    bufsize = buffer_sizes[tp] = table.recovery_buffer_size
                active_events_received_at[tp] = now
            elif tp in standby_tps:
                table = tp_to_table[tp]
                offsets = standby_offsets
                if bufsize is None:
                    bufsize = buffer_sizes[tp] = table.standby_buffer_size
                    standby_events_received_at[tp] = now
            else:
                continue

            seen_offset = offsets.get(tp, None)
            if seen_offset is None or offset > seen_offset:
                offsets[tp] = offset
                buf = buffers[table]
                buf.append(event)
                await table.on_changelog_event(event)
                if len(buf) >= bufsize:
                    table.apply_changelog_batch(buf)
                    buf.clear()
                    self._last_flush_at = now
                now_after = monotonic()

                if is_active:
                    last_processed_at = self._last_active_event_processed_at
                    if last_processed_at is not None:
                        processing_times.append(now_after - last_processed_at)
                        max_samples = self.num_samples_required_for_estimate
                        if len(processing_times) > max_samples:
                            processing_times.popleft()
                    self._last_active_event_processed_at = now_after

            _maybe_signal_recovery_end()

            if self.standbys_pending and not self.standby_remaining_total():
                if self._standbys_span:
                    finish_span(self._standbys_span)
                    self._standbys_span = None
                self.tables.on_standbys_ready()
Example #57
0
    def __init__(self, **kwargs):
        """Constructor for the PyArlo object."""
        # core values
        self._last_error = None

        # Set up the config first.
        self._cfg = ArloCfg(self, **kwargs)

        # Create storage/scratch directory.
        if self._cfg.state_file is not None or self._cfg.dump_file is not None:
            try:
                os.mkdir(self._cfg.storage_dir)
            except Exception:
                pass

        # Create remaining components.
        self._bg = ArloBackground(self)
        self._st = ArloStorage(self)
        self._be = ArloBackEnd(self)
        self._ml = ArloMediaLibrary(self)

        # Failed to login, then stop now!
        if not self._be.is_connected:
            return

        self._lock = threading.Condition()
        self._bases = []
        self._cameras = []
        self._lights = []
        self._doorbells = []

        # On day flip we do extra work, record today.
        self._today = datetime.date.today()

        # Every few hours we can refresh the device list.
        self._refresh_devices_at = time.monotonic(
        ) + self._cfg.refresh_devices_every

        # Every few minutes we can refresh the mode list.
        self._refresh_modes_at = time.monotonic(
        ) + self._cfg.refresh_modes_every

        # default blank image when waiting for camera image to appear
        self._blank_image = base64.standard_b64decode(BLANK_IMAGE)

        # Slow piece.
        # Get devices, fill local db, and create device instance.
        self.info("pyaarlo starting")
        self._started = False
        self._refresh_devices()

        for device in self._devices:
            dname = device.get("deviceName")
            dtype = device.get("deviceType")
            if device.get("state", "unknown") != "provisioned":
                self.info("skipping " + dname + ": state unknown")
                continue

            # This needs it's own code now... Does no parent indicate a base station???
            if (dtype == "basestation" or device.get("modelId") == "ABC1000"
                    or dtype == "arloq" or dtype == "arloqs"):
                self._bases.append(ArloBase(dname, self, device))
            # Newer devices can connect directly to wifi and can be its own base station,
            # it can also be assigned to a real base station
            if (device.get("modelId").startswith(MODEL_WIRED_VIDEO_DOORBELL)
                    or device.get("modelId").startswith(MODEL_PRO_3_FLOODLIGHT)
                    or device.get("modelId").startswith(MODEL_PRO_4)
                    or device.get("modelId").startswith(MODEL_ESSENTIAL)
                    or device.get("modelId").startswith(MODEL_ESSENTIAL_INDOOR)
                    or device.get("modelId").startswith(
                        MODEL_WIREFREE_VIDEO_DOORBELL)):
                parent_id = device.get("parentId", None)
                if parent_id is None or parent_id == device.get(
                        "deviceId", None):
                    self._bases.append(ArloBase(dname, self, device))
            if dtype == "arlobridge":
                self._bases.append(ArloBase(dname, self, device))
            if (dtype == "camera" or dtype == "arloq" or dtype == "arloqs"
                    or device.get("modelId").startswith(
                        MODEL_WIRED_VIDEO_DOORBELL)
                    or device.get("modelId").startswith(
                        MODEL_WIREFREE_VIDEO_DOORBELL)):
                self._cameras.append(ArloCamera(dname, self, device))
            if dtype == "doorbell":
                self._doorbells.append(ArloDoorBell(dname, self, device))
            if dtype == "lights":
                self._lights.append(ArloLight(dname, self, device))

        # Save out unchanging stats!
        self._st.set(["ARLO", TOTAL_CAMERAS_KEY], len(self._cameras))
        self._st.set(["ARLO", TOTAL_BELLS_KEY], len(self._doorbells))
        self._st.set(["ARLO", TOTAL_LIGHTS_KEY], len(self._lights))

        # Always ping bases first!
        self._ping_bases()

        # Initial config and state retrieval.
        if self._cfg.synchronous_mode:
            # Synchronous; run them one after the other
            self.debug("getting initial settings")
            self._refresh_bases(initial=True)
            self._refresh_modes()
            self._refresh_ambient_sensors()
            self._refresh_doorbells()
            self._ml.load()
            self._refresh_camera_thumbnails(True)
            self._refresh_camera_media(True)
            self._initial_refresh_done()
        else:
            # Asynchronous; queue them to run one after the other
            self.debug("queueing initial settings")
            self._bg.run(self._refresh_bases, initial=True)
            self._bg.run(self._refresh_modes)
            self._bg.run(self._refresh_ambient_sensors)
            self._bg.run(self._refresh_doorbells)
            self._bg.run(self._ml.load)
            self._bg.run(self._refresh_camera_thumbnails, wait=False)
            self._bg.run(self._refresh_camera_media, wait=False)
            self._bg.run(self._initial_refresh_done)

        # Register house keeping cron jobs.
        self.debug("registering cron jobs")
        self._bg.run_every(self._fast_refresh, FAST_REFRESH_INTERVAL)
        self._bg.run_every(self._slow_refresh, SLOW_REFRESH_INTERVAL)

        # Wait for initial refresh
        if self._cfg.wait_for_initial_setup:
            with self._lock:
                while not self._started:
                    self.debug("waiting for initial setup...")
                    self._lock.wait(1)
            self.debug("setup finished...")
    async def _pinger(self):
        in_flight = []
        next_ping_at = None
        self._logger.debug("%s: pinger booted up", self.ping_address)
        try:
            while True:
                self._logger.debug("%s: pinger loop. interval=%r",
                                   self.ping_address,
                                   self.ping_interval)
                now = time.monotonic()

                ping_interval = self.ping_interval.total_seconds()
                if next_ping_at is None:
                    next_ping_at = now - 1

                timeout = next_ping_at - now

                if timeout <= 0:
                    # do not send pings while the client is in suspended state
                    # (= Stream Management hibernation). This will only add to
                    # the queue for no good reason, we won’t get any reply soon
                    # anyways.
                    if self._client.suspended:
                        self._logger.debug(
                            "%s: omitting self-ping, as the stream is "
                            "currently hibernated",
                            self.ping_address,
                        )
                    else:
                        self._logger.debug(
                            "%s: sending self-ping with timeout %r",
                            self.ping_address,
                            self.ping_timeout,
                        )
                        in_flight.append(asyncio.ensure_future(
                            asyncio.wait_for(
                                aioxmpp.ping.ping(self._client,
                                                  self.ping_address),
                                self.ping_timeout.total_seconds()
                            )
                        ))
                    next_ping_at = now + _apply_jitter(ping_interval, 0.1)
                    timeout = ping_interval

                assert timeout > 0

                if not in_flight:
                    self._logger.debug(
                        "%s: pinger has nothing to do, sleeping for %s",
                        self.ping_address,
                        timeout,
                    )
                    await asyncio.sleep(timeout)
                    continue

                self._logger.debug(
                    "%s: pinger waiting for %d pings for at most %ss",
                    self.ping_address,
                    len(in_flight),
                    timeout,
                )
                done, pending = await asyncio.wait(
                    in_flight,
                    timeout=timeout,
                    return_when=asyncio.FIRST_COMPLETED,
                )

                for fut in done:
                    self._interpret_result(fut)

                in_flight = list(pending)
        finally:
            self._logger.debug("%s: pinger exited", self.ping_address,
                               exc_info=True)
            for fut in in_flight:
                if not fut.done():
                    fut.cancel()
Example #59
0
    QUIET = 0
    INFO = enum.auto()
    COMMAND = enum.auto()
    STDOUT = enum.auto()
    CHANNEL = enum.auto()

    def __str__(self) -> str:
        return super(Verbosity, self).__str__().split(".")[-1]


NESTING = 0
INTERACTIVE = False
VERBOSITY = Verbosity.COMMAND
LOGFILE: typing.Optional[typing.TextIO] = None
START_TIME = time.monotonic()


class EventIO(io.StringIO):
    """Stream for a log event."""
    def __init__(
        self,
        ty: typing.List[str],
        initial: typing.Union[str, _TC, None] = None,
        *,
        verbosity: Verbosity = Verbosity.INFO,
        nest_first: typing.Optional[str] = None,
        **kwargs: typing.Any,
    ) -> None:
        """
        Create a log event.
Example #60
-7
    def test_stress_delivery_simultaneous(self):
        """
        This test uses simultaneous signal handlers.
        """
        N = self.decide_itimer_count()
        sigs = []

        def handler(signum, frame):
            sigs.append(signum)

        self.setsig(signal.SIGUSR1, handler)
        self.setsig(signal.SIGALRM, handler)  # for ITIMER_REAL

        expected_sigs = 0
        deadline = time.monotonic() + 15.0

        while expected_sigs < N:
            # Hopefully the SIGALRM will be received somewhere during
            # initial processing of SIGUSR1.
            signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
            os.kill(os.getpid(), signal.SIGUSR1)

            expected_sigs += 2
            # Wait for handlers to run to avoid signal coalescing
            while len(sigs) < expected_sigs and time.monotonic() < deadline:
                time.sleep(1e-5)

        # All ITIMER_REAL signals should have been delivered to the
        # Python handler
        self.assertEqual(len(sigs), N, "Some signals were lost")