def run_server(command, response, port, report): """Run the frame drawing service. The control protocol for the server's command queue is as follows: (command, payload) Examples are (FRAME, update_number, frame_time, mixer) -> data payload to draw a frame (QUIT, _) -> quit the server thread The server communicates with the control thread over the response queue. It requests a frame with (FRAME_REQ, _) and reports a fatal, thread-death error with (FATAL_ERROR, err) """ try: socket = create_pub_socket(port) # we're ready to render response.put((RUNNING, None)) log_time = monotonic() while 1: # ready to draw a frame response.put((FRAME_REQ, None)) # wait for a reply action, payload = command.get() # check if time to quit if action == QUIT: return # no other valid commands besides FRAME elif action != FRAME: # blow up with fatal error # we could try again, but who knows how we even got here raise RenderServerError("Unrecognized command: {}".format(action)) frame_number, frame_time, mixer, clocks = payload # render the payload we received video_outs = mixer.draw_layers(clocks) for video_chan, draw_commands in enumerate(video_outs): serialized = msgpack.dumps( (frame_number, frame_time, draw_commands), use_single_float=True) socket.send_multipart((str(video_chan), serialized)) if report:# and frame_number % 1 == 0: now = monotonic() log.debug("Framerate: {}".format(1 / (now - log_time))) log_time = now except Exception as err: # some exception we didn't catch _, _, tb = sys.exc_info() response.put((FATAL_ERROR, (err, traceback.format_tb(tb)))) return
def poll(self, timeout=None): """Polls for events while handling timers. poll() will wait up to timeout seconds for sockets or files registered with self.reactor to become ready. A timeout of None will cause poll to wait an infinite amount of time. While waiting for poll events, scheduled events will be handled, potentially causing the wait time to slip a bit. """ elapsed = 0.0 mono_time = clock.monotonic() while True: wall_time = time_mod.time() self._mono.execute(mono_time) self._wall.execute(wall_time) delays = [ self.LOOP_INTERVAL if timeout is None else min(timeout - elapsed, self.LOOP_INTERVAL), self._mono.delay(mono_time), self._wall.delay(wall_time), ] delay = min(d for d in delays if d is not None) events = self.reactor.poll(delay) if events: return events last_time, mono_time = mono_time, clock.monotonic() elapsed += mono_time - last_time if timeout is not None and elapsed >= timeout: return []
def send_sms(self, to, content, reference, sender=None): data = { "apiKey": self.api_key, "from": self.from_number if sender is None else sender, "to": to.replace('+', ''), "message": content, "reference": reference } start_time = monotonic() try: response = request( "POST", self.url, data=data ) response.raise_for_status() try: json.loads(response.text) if response.json()['code'] != 0: raise ValueError() except (ValueError, AttributeError) as e: self.record_outcome(False, response) raise FiretextClientResponseException(response=response, exception=e) self.record_outcome(True, response) except RequestException as e: self.record_outcome(False, e.response) raise FiretextClientResponseException(response=e.response, exception=e) finally: elapsed_time = monotonic() - start_time self.current_app.logger.info("Firetext request finished in {}".format(elapsed_time)) self.statsd_client.timing("clients.firetext.request-time", elapsed_time) return response
def token(self): access_token, expire_time = getattr(self, "bing_cached_access_token", None), \ getattr(self, "bing_cached_access_token_expiry", None) if expire_time is None or monotonic() > expire_time: # first credential request, or the access token from the previous one expired # get an access token using OAuth credential_url = "https://oxford-speech.cloudapp.net/token/issueToken" credential_request = Request(credential_url, data=urlencode({ "grant_type": "client_credentials", "client_id": "python", "client_secret": self.key, "scope": "https://speech.platform.bing.com" }).encode("utf-8")) start_time = monotonic() try: credential_response = urlopen(credential_request) except HTTPError as e: raise RequestError("recognition request failed: {0}".format( getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6 except URLError as e: raise RequestError("recognition connection failed: {0}".format(e.reason)) credential_text = credential_response.read().decode("utf-8") credentials = json.loads(credential_text) access_token, expiry_seconds = credentials["access_token"], float(credentials["expires_in"]) # save the token for the duration it is valid for self.bing_cached_access_token = access_token self.bing_cached_access_token_expiry = start_time + expiry_seconds return access_token
def test_sensor_watch_queue_gets_deleted_on_stop(self): def create_handler(sensor_db): pass def update_handler(sensor_db): pass def delete_handler(sensor_db): pass sensor_watcher = SensorWatcher(create_handler, update_handler, delete_handler, queue_suffix='covfefe') sensor_watcher.start() sw_queues = self._get_sensor_watcher_amqp_queues(queue_name='st2.sensor.watch.covfefe') start = monotonic() done = False while not done: eventlet.sleep(0.01) sw_queues = self._get_sensor_watcher_amqp_queues(queue_name='st2.sensor.watch.covfefe') done = len(sw_queues) > 0 or ((monotonic() - start) < 5) sensor_watcher.stop() sw_queues = self._get_sensor_watcher_amqp_queues(queue_name='st2.sensor.watch.covfefe') self.assertTrue(len(sw_queues) == 0)
def handle(self, comment, *args, **options): items = {} created_count = 0 from django.conf import settings translation.activate(settings.LANGUAGE_CODE) for key, import_path in STAT_METRICS.items(): f = import_string(import_path) name = getattr(f, 'name', key) description = getattr(f, 'description', import_path) item, created = Item.objects.get_or_create(key=key, defaults={'name': name, 'description': description}) if created: created_count += 1 items[key] = item self.stdout.write("Registered {} new items.".format(created_count)) values = [] start = monotonic() time = now() for key, import_path in STAT_METRICS.items(): f = import_string(import_path) values.append(Value(item=items[key], time=time, value=f())) end = int(monotonic() - start) desc = _("Time (seconds) in which metric statistical information was collected.") system_item, created = Item.objects.get_or_create(key='stats.collect_time', defaults={'name': _("Time to calculate statistics"), 'description': desc}) values.append(Value(item=system_item, time=time, value=end)) with transaction.atomic(): Value.objects.bulk_create(values) Item.objects.filter(pk__in=[value.item.pk for value in values]).update(last_updated=now()) self.stdout.write("Registered {} values.".format(len(values))) translation.deactivate()
def update_latency(self, latency): self.smoothed_latency = (7 * self.smoothed_latency + latency) / 8 self.smoothed_variability = (7 * self.smoothed_variability + abs(latency - self.smoothed_latency)) / 8 self.max_latency = self.smoothed_latency + (self.NUM_DEV * self.smoothed_variability) self.adjust_count -= 1 seconds_since_last_update = monotonic() - self.last_adjustment_time if (self.adjust_count <= 0) and (seconds_since_last_update >= self.SECONDS_BEFORE_ADJUSTMENT): # This algorithm is based on the Welsh and Culler "Adaptive Overload # Control for Busy Internet Servers" paper, although based on a smoothed # mean latency, rather than the 90th percentile as per the paper. # Also, the additive increase is scaled as a proportion of the maximum # bucket size, rather than an absolute number as per the paper. accepted_percent = 100 if (self.accepted + self.rejected) != 0: accepted_percent = 100 * (float(self.accepted) / float(self.accepted + self.rejected)) err = (self.smoothed_latency - self.target_latency) / self.target_latency hss_overloads = penaltycounter.get_hss_penalty_count() if ((err > self.DECREASE_THRESHOLD) or (hss_overloads > 0)): # latency is above where we want it to be, or we are getting overload responses from the HSS, # so adjust the rate downwards by a multiplicative factor new_rate = self.bucket.rate / self.DECREASE_FACTOR if new_rate < self.min_token_rate: new_rate = self.min_token_rate _log.info("Accepted %.2f%% of requests, latency error = %f, HSS overloads = %d, decrease rate %f to %f" % (accepted_percent, err, hss_overloads, self.bucket.rate, new_rate)) self.bucket.update_rate(new_rate) elif err < self.INCREASE_THRESHOLD: # latency is sufficiently below the target, so increasing the permitted # request rate would be sensible; but first check that we are using a # significant proportion of the current rate - if we're allowing 100 # requests/sec, and we get 1 request/sec during a quiet period, we will # handle that well, but it is not sufficient evidence that we can increase the rate. max_permitted_requests = self.bucket.rate * seconds_since_last_update # Arbitrary threshold of at least 50% of maximum permitted requests minimum_threshold = max_permitted_requests * 0.5 if (self.accepted > minimum_threshold): new_rate = self.bucket.rate + (-err) * self.bucket.max_size * self.INCREASE_FACTOR _log.info("Accepted %.2f%% of requests, latency error = %f, increase rate %f to %f" " based on %d accepted requests in last %.2f seconds" % (accepted_percent, err, self.bucket.rate, new_rate, self.accepted, seconds_since_last_update)) self.bucket.update_rate(new_rate) else: _log.info("Only handled %d requests in the last %.2f seconds, rate remains unchanged." " Minimum threshold for change is %f" % (self.accepted, seconds_since_last_update, minimum_threshold)) else: _log.info("Accepted %f%% of requests, latency error = %f, rate %f unchanged" % (accepted_percent, err, self.bucket.rate)) self.accepted = 0 self.rejected = 0 self.adjust_count = self.REQUESTS_BEFORE_ADJUSTMENT self.last_adjustment_time = monotonic() penaltycounter.reset_hss_penalty_count()
def next(self): """Return the next batch of items to upload.""" queue = self.queue items = [] start_time = monotonic.monotonic() total_size = 0 while len(items) < self.upload_size: elapsed = monotonic.monotonic() - start_time if elapsed >= self.upload_interval: break try: item = queue.get(block=True, timeout=self.upload_interval - elapsed) item_size = len(json.dumps(item, cls=DatetimeSerializer).encode()) if item_size > MAX_MSG_SIZE: self.log.error('Item exceeds 32kb limit, dropping. (%s)', str(item)) continue items.append(item) total_size += item_size if total_size >= BATCH_SIZE_LIMIT: self.log.debug('hit batch size limit (size: %d)', total_size) break except Empty: break return items
def _sleep(self, sec, critical_section=False): until = monotonic() + sec while monotonic() < until: if not critical_section: self._interrupt_point() sleep(1)
def send_email(self, source, to_addresses, subject, body, html_body='', reply_to_address=None): try: if isinstance(to_addresses, str): to_addresses = [to_addresses] reply_to_addresses = [reply_to_address] if reply_to_address else [] body = { 'Text': {'Data': body} } if html_body: body.update({ 'Html': {'Data': html_body} }) start_time = monotonic() response = self._client.send_email( Source=source, Destination={ 'ToAddresses': to_addresses, 'CcAddresses': [], 'BccAddresses': [] }, Message={ 'Subject': { 'Data': subject, }, 'Body': body }, ReplyToAddresses=reply_to_addresses ) except botocore.exceptions.ClientError as e: self.statsd_client.incr("clients.ses.error") # http://docs.aws.amazon.com/ses/latest/DeveloperGuide/api-error-codes.html if e.response['Error']['Code'] == 'InvalidParameterValue': raise InvalidEmailError('email: "{}" message: "{}"'.format( to_addresses[0], e.response['Error']['Message'] )) else: self.statsd_client.incr("clients.ses.error") raise AwsSesClientException(str(e)) except Exception as e: self.statsd_client.incr("clients.ses.error") raise AwsSesClientException(str(e)) else: elapsed_time = monotonic() - start_time current_app.logger.info("AWS SES request finished in {}".format(elapsed_time)) self.statsd_client.timing("clients.ses.request-time", elapsed_time) self.statsd_client.incr("clients.ses.success") return response['MessageId']
def _request(self, method, url, data=None, params=None): if not self.enabled: return None url = url.lstrip('/') url = urlparse.urljoin(self.base_url, url) logger.debug("API request {method} {url}", extra={ 'method': method, 'url': url }) headers = { "Content-type": "application/json", "Authorization": "Bearer {}".format(self.auth_token), "User-agent": "DM-API-Client/{}".format(__version__), } headers = self._add_request_id_header(headers) headers = self._add_zipkin_tracing_headers(headers) start_time = monotonic() try: response = requests.request( method, url, headers=headers, json=data, params=params) response.raise_for_status() except requests.RequestException as e: api_error = HTTPError.create(e) elapsed_time = monotonic() - start_time logger.log( logging.INFO if api_error.status_code == 404 else logging.WARNING, "API {api_method} request on {api_url} failed with {api_status} '{api_error}'", extra={ 'api_method': method, 'api_url': url, 'api_status': api_error.status_code, 'api_error': api_error.message, 'api_time': elapsed_time }) raise api_error else: elapsed_time = monotonic() - start_time logger.info( "API {api_method} request on {api_url} finished in {api_time}", extra={ 'api_method': method, 'api_url': url, 'api_status': response.status_code, 'api_time': elapsed_time }) try: return response.json() except ValueError as e: raise InvalidResponse(response, message="No JSON object could be decoded")
def listen_and_reply(sock, compute_func): while True: req = protocol.from_encoded_message(sock.recv()) start = monotonic() # TODO: try/catch ret = compute_func(req) end = monotonic() t = end - start diff = [] diff.append(math.floor(t)) diff.append((t - diff[0]) * math.pow(10, 9)) sock.send(protocol.to_encoded_message(ret, diff))
def request(self, method, url, data=None, params=None): logger.debug("API request {} {}".format(method, url)) payload = json.dumps(data) api_token = create_jwt_token( self.api_key, self.service_id ) headers = { "Content-type": "application/json", "Authorization": "Bearer {}".format(api_token), "User-agent": "NOTIFY-API-PYTHON-CLIENT/{}".format(__version__), } url = urlparse.urljoin(self.base_url, url) start_time = monotonic() try: response = requests.request( method, url, headers=headers, data=payload, params=params ) response.raise_for_status() except requests.RequestException as e: api_error = HTTPError.create(e) logger.error( "API {} request on {} failed with {} '{}'".format( method, url, api_error.status_code, api_error.message ) ) raise api_error finally: elapsed_time = monotonic() - start_time logger.debug("API {} request on {} finished in {}".format(method, url, elapsed_time)) try: if response.status_code == 204: return return response.json() except ValueError: raise InvalidResponse( response, message="No JSON response object could be decoded" )
def ping(self): """Ping the server, returning the round-trip latency in milliseconds The A2A_PING request is deprecated so this actually sends a A2S_INFO request and times that. The time difference between the two should be negligble. """ time_sent = monotonic.monotonic() self.request(messages.InfoRequest()) messages.InfoResponse.decode(self.get_response()) time_received = monotonic.monotonic() return (time_received - time_sent) * 1000.0
def _try_connect(self, pdpc_apn, pdpc_type, retry_timeout): retry = monotonic() + retry_timeout while True: self._interrupt_point() self._status = Manager.Status.connecting if not self._connect(pdpc_apn, pdpc_type): self._status = Manager.Status.connect_failure if monotonic() >= retry: break self._sleep(10) else: return True
def run(self): try: monotime = 0.0 self.SBrickPeripheral = Peripheral() self.SBrickPeripheral.connect(self.sBrickAddr) service = self.SBrickPeripheral.getServiceByUUID('4dc591b0-857c-41de-b5f1-15abda665b0c') characteristics = service.getCharacteristics('02b8cbcc-0e25-4bda-8790-a15f53e6010f') for characteristic in characteristics: if characteristic.uuid == '02b8cbcc-0e25-4bda-8790-a15f53e6010f': self.characteristicRemote = characteristic if self.characteristicRemote is None: return self.emit('sbrick_connected') self.need_authentication = self.get_need_authentication() self.authenticated = not self.need_authentication if self.need_authentication: if self.password_owner is not None: self.authenticate_owner(self.password_owner) while not self.stopFlag: if self.authenticated: if monotonic.monotonic() - monotime >= 0.05: self.send_command() monotime = monotonic.monotonic() self.eventSend.wait(0.01) for channel in self.brickChannels: if channel.decrement_run_timer(): monotime = 0.0 self.drivingLock.release() # print("stop run normal") self.emit("sbrick_channel_stop", channel.channel) if channel.decrement_brake_timer(): self.drivingLock.release() # print("stop brake timer") monotime = 0.0 self.emit("sbrick_channel_stop", channel.channel) if self.authenticated: self.stop_all() self.send_command() self.SBrickPeripheral.disconnect() self.emit('sbrick_disconnected_ok') except BTLEException as ex: self.emit("sbrick_disconnected_error", ex.message)
def mag_raw_callback(self, sbp_msg, **metadata): self.mag[:-1, :] = self.mag[1:, :] self.mag[-1] = (sbp_msg.mag_x, sbp_msg.mag_y, sbp_msg.mag_z) if monotonic() - self.last_plot_update_time > GUI_UPDATE_PERIOD: self.mag_set_data()
def _solution_draw(self): self.list_lock.acquire() current_time = monotonic() self.last_plot_update_time = current_time pending_draw_modes = self.pending_draw_modes current_mode = pending_draw_modes[-1] if len(pending_draw_modes) > 0 else None # Periodically, we make sure to redraw older data to expire old plot data if current_time - self.last_stale_update_time > STALE_DATA_PERIOD: # we don't update old solution modes every timestep to try and save CPU pending_draw_modes = list(mode_string_dict.values()) self.last_stale_update_time = current_time for mode_string in pending_draw_modes: if self.running: update_current = mode_string == current_mode if current_mode else True self._synchronize_plot_data_by_mode(mode_string, update_current) if mode_string in self.pending_draw_modes: self.pending_draw_modes.remove(mode_string) self.list_lock.release() # make the zoomall win over the position centered button if not self.zoomall and self.position_centered and self.running: d = (self.plot.index_range.high - self.plot.index_range.low) / 2. self.plot.index_range.set_bounds(self.last_soln.e - d, self.last_soln.e + d) d = (self.plot.value_range.high - self.plot.value_range.low) / 2. self.plot.value_range.set_bounds(self.last_soln.n - d, self.last_soln.n + d) if self.zoomall: plot_square_axes(self.plot, ('e_fixed', 'e_float', 'e_dgnss'), ('n_fixed', 'n_float', 'n_dgnss'))
def wrapper(*args, **kwargs): start_time = monotonic() res = func(*args, **kwargs) elapsed_time = monotonic() - start_time current_app.logger.info( "{namespace} call {func} took {time}".format( namespace=namespace, func=func.__name__, time="{0:.4f}".format(elapsed_time) ) ) statsd_client.incr('{namespace}.{func}'.format( namespace=namespace, func=func.__name__) ) statsd_client.timing('{namespace}.{func}'.format( namespace=namespace, func=func.__name__), elapsed_time ) return res
def reset(self): self.current = 0 self.sigma = 0 self.sigma_squared = 0 self.lwm = 0 self.hwm = 0 self.start_time = monotonic()
def _timer(timeout): """Iterable timeout timer. :param timeout: the number of seconds to wait before timing out. If ``None`` then the timer will never timeout. :raises RCONTimeoutError: once the timeout is reached. :returns: an iterable that will yield items until the timeout is reached. """ time_start = monotonic.monotonic() while (timeout is None or monotonic.monotonic() - time_start < timeout): yield raise RCONTimeoutError
def intend(self, ident): # preconditions assert(self.quota > 0) assert(self.window > 0) # init now = monotonic() if (ident not in self.state): self.state[ident] = (deque(), 0) (queue, failed_intents) = self.state[ident] # flush expired intents while (queue and ((now - queue[0]) > self.window)): queue.popleft() # work out what to do with current intent if (len(queue) < self.quota): queue.append(now) failed_intents = 0 else: failed_intents += 1 # postconditions assert(len(queue) <= self.quota) # save state self.state[ident] = (queue, failed_intents) # generate bean counters quota_left = self.quota - len(queue) - failed_intents window_left = self.window - (now - queue[0]) return (quota_left, window_left)
def _start_dig(self): """Work function digital input thread. """ # Watch digital input lines in tight loop. # If inputs change state when signals are # enabled, SIGUSR1 will be generated.. # # Starts with interrupts disabled! self.din_alerts(enable=False) self.nrunning += 1 laststate = None try: while self.running: # Read FIO6 and FIO7; these were set to # be INPUTs in __init__() state = self.d.readRegister(6006, 2) if laststate is None: laststate = state elif self._gen_alerts: # if the state of either line has changed, interrupt # the main thread will os.kill(). self.ievent is used # to pass info about the event. self.ievent = ('din', state, monotonic.monotonic()) if self.dig_callback: self.dig_callback(self, self.ievent) else: os.kill(os.getpid(), signal.SIGUSR1) laststate = state finally: self.nrunning -= 1
def __init__( self, incoming_response, swagger_result, start_time, request_end_time, handled_exception_info, request_config, ): """ :param incoming_response: a subclass of bravado_core.response.IncomingResponse. :param swagger_result: the unmarshalled result that is being returned to the user. :param start_time: monotonic timestamp indicating when the HTTP future was created. Depending on the internal operation of the HTTP client used, this is either before the HTTP request was initiated (default client) or right after the HTTP request was sent (e.g. bravado-asyncio / fido). :param request_end_time: monotonic timestamp indicating when we received the incoming response, excluding unmarshalling, validation or potential fallback result processing. :param handled_exception_info: sys.exc_info() data if an exception was caught and handled as part of a fallback response; note that the third element in the list is a string representation of the traceback, not a traceback object. :param RequestConfig request_config: namedtuple containing the request options that were used for making this request. """ self._incoming_response = incoming_response self.start_time = start_time self.request_end_time = request_end_time self.processing_end_time = monotonic.monotonic() self.handled_exception_info = handled_exception_info self.request_config = request_config # we expose the result to the user through the BravadoResponse object; # we're passing it in to this object in case custom implementations need it self._swagger_result = swagger_result
def wait(self, timeout=1): events = self.epoll.poll(timeout=timeout) # Timer events mono = monotonic.monotonic() while len(self.time_events) > 0 and (self.time_events[0][0] < mono): ts, fd, callback = heapq.heappop(self.time_events) callback() for fd, event in events: sock = self.fd_to_sock[fd] # Errors try: if event & (select.EPOLLERR | select.EPOLLHUP): raise SocketFailed() if event & select.EPOLLIN: sock.on_read() if event & select.EPOLLOUT: sock.on_write() # I'm done with sending for now if len(sock.data_to_send) == 0 and len( sock.priority_queue) == 0: self.epoll.modify(sock.fileno(), RO) except SocketFailed: self.epoll.unregister(fd) del self.fd_to_sock[fd] sock.on_fail() self.noshot(sock) sock.close()
def update_plot(self): self.last_plot_update_time = monotonic() self.plot_data.set_data('acc_x', self.acc_x) self.plot_data.set_data('acc_y', self.acc_y) self.plot_data.set_data('acc_z', self.acc_z) self.plot_data.set_data('gyr_x', self.gyro_x) self.plot_data.set_data('gyr_y', self.gyro_y) self.plot_data.set_data('gyr_z', self.gyro_z)
def __init__(self, connection, heartbeat_interval=0): self.connection = connection self.heartbeat_interval = heartbeat_interval self.last_heartbeat_on = monotonic.monotonic() # last heartbeat from server self.connection.watchdog(self.heartbeat_interval, self.on_timer) self.connection.watch(AnyWatch(self.on_heartbeat))
def _main_thread(self): next_check = monotonic() while not self._stop: now = monotonic() if now < next_check: sleep(1) continue next_check = now + self._period_sec try: cellular_information = CellularInformation.get() if cellular_information is not None: self._cellular_information = cellular_information except Exception as e: _logger.error("should not reach here") _logger.warning(e)
def periodic_timer(self, period, function, *args, **kwargs): """Create a periodic timer to call function every period seconds. Like the timer method except that the timer is automatically rearmed after the function completes. """ timer = sched.RecurringEvent(period, function, args, kwargs) self._mono.schedule(clock.monotonic() + period, timer) return timer
def run(): socket = create_rep_socket(port) while True: try: msg = socket.recv() now = monotonic() socket.send(msgpack.dumps(now)) except Exception as err: logging.error(err)
def on_start_recording(self, task): """Start recording""" task['page_data'] = {'date': time.time()} task['page_result'] = None task['run_start_time'] = monotonic.monotonic() if self.browser_version is not None and 'browserVersion' not in task[ 'page_data']: task['page_data']['browserVersion'] = self.browser_version task['page_data']['browser_version'] = self.browser_version if not self.options.throttle and 'throttle_cpu' in self.job: task['page_data']['throttle_cpu_requested'] = self.job[ 'throttle_cpu_requested'] if self.job['throttle_cpu'] > 1: task['page_data']['throttle_cpu'] = self.job['throttle_cpu'] if self.devtools is not None: self.devtools.start_recording()
def to_dict(self): """Create a dictionary with the information in this message. Returns: dict: The dictionary with information """ msg_dict = {} msg_dict['level'] = self.level msg_dict['message'] = self.message msg_dict['now_time'] = monotonic() msg_dict['created_time'] = self.created msg_dict['id'] = self.id msg_dict['count'] = self.count return msg_dict
def run(self): while self.running: try: if randint( 1, 10000) < 10000: # 1 in every 10000 queries will fail graph.read("RETURN 1") else: graph.read("XXXXXXXX") except Neo4jError as failure: stats["read_failures"].append((monotonic(), failure)) #raise else: stats["read_successes"] += 1 finally: sleep(uniform(0.0, 0.1))
def __init__( self, future, # type: FutureAdapter response_adapter, # type: typing.Callable[[typing.Any], IncomingResponse] operation=None, # type: typing.Optional[Operation] request_config=None, # type: typing.Optional[RequestConfig] ): # type: (...) -> None self._start_time = monotonic.monotonic() self.future = future self.response_adapter = response_adapter self.operation = operation self.request_config = request_config or RequestConfig( {}, also_return_response_default=False, )
def emit(self, event, data = None): try: if self.lastContactTime > 0: self.sio.emit(event, data) self.lastContactTime = monotonic() self.numContacts += 1 elif self.numDisconnects > 0: # only warn if previously connected logger.warning("Unable to emit to disconnected secondary {0} at {1}, event='{2}'".\ format(self.id+1, self.address, event)) except Exception: logger.exception("Error emitting to secondary {0} at {1}, event='{2}'".\ format(self.id+1, self.address, event)) if self.sio.connected: logger.warning("Disconnecting after error emitting to secondary {0} at {1}".\ format(self.id+1, self.address)) self.sio.disconnect()
def step_complete(self, task): """All of the processing for the current test step is complete""" # Write out the accumulated page_data if task['log_data'] and task['page_data']: if 'browser' in self.job: task['page_data']['browser_name'] = self.job['browser'] if 'step_name' in task: task['page_data']['eventName'] = task['step_name'] if 'run_start_time' in task: task['page_data']['test_run_time_ms'] = \ int(round((monotonic.monotonic() - task['run_start_time']) * 1000.0)) path = os.path.join(task['dir'], task['prefix'] + '_page_data.json.gz') json_page_data = json.dumps(task['page_data']) logging.debug('Page Data: %s', json_page_data) with gzip.open(path, 'wb', 7) as outfile: outfile.write(json_page_data)
def on_timer_task(self, event): """timer task has three functionalities: 1. if it couldn't be connected to brokers, then after self.timeout, exception will be raised. 2. Check if any request is timed out, if it is, then continue checking the retry history - if the retry history shows it's been tried more than 3 times, then stop loop. - if not, then remove it from awaiting_response dict and send it back to to_send queue, bump the retry count. 3. Check if the number of requests in processing has reached throttle or not, if not then send corresponding number of messages. """ if not self.connected: LOG.error("Couldn't connect to brokers after %s seconds", self.timeout) event.container.stop() raise MessageHandlerTimeoutException() for request_id, started in sorted(list( self.awaiting_response.items())): if monotonic.monotonic() - started > self.timeout: if request_id not in self.retry_count or self.retry_count[ request_id] < self.retry: self.retry_count.setdefault(request_id, 0) self.retry_count[request_id] += 1 LOG.warn( "Didn't receive response in %s for request %s, will retry [%s/%s]", self.timeout, request_id, self.retry_count[request_id], self.retry, ) # append to resend queue and remove from awaiting_response queue self.to_send.append(self.id_msg_map[request_id]) self.awaiting_response.pop(request_id) else: LOG.warn("Stopping message event loop due to timeout %s", request_id) event.container.stop() raise MessageHandlerTimeoutException() # send more requests if number of waiting < throttle spots = self.throttle - len(self.awaiting_response) if self.to_send and spots > 0: # if there's free spots, send messages in queue self._send_message(min(len(self.to_send), spots)) # schdule the next timer task self.timer_task = event.container.schedule(self.TIMER_TASK_DELAY, self)
def _wait_for_job(self, job_path): """Poll WMI job state and wait for completion.""" job_wmi_path = job_path.replace('\\', '/') job = self._get_wmi_obj(job_wmi_path) # We'll log the job status from time to time. last_report_time = 0 report_interval = 5 while not self._is_job_completed(job): now = monotonic.monotonic() if now - last_report_time > report_interval: job_details = self._get_job_details(job) LOG.debug("Waiting for WMI job: %s.", job_details) last_report_time = now time.sleep(0.1) job = self._get_wmi_obj(job_wmi_path) job_state = job.JobState err_code = job.ErrorCode # We'll raise an exception for killed jobs. job_failed = job_state not in self._successful_job_states or err_code job_warnings = job_state == constants.JOB_STATE_COMPLETED_WITH_WARNINGS job_details = self._get_job_details( job, extended=(job_failed or job_warnings)) if job_failed: err_sum_desc = getattr(job, 'ErrorSummaryDescription', None) err_desc = job.ErrorDescription LOG.error("WMI job failed: %s.", job_details) raise exceptions.WMIJobFailed(job_state=job_state, error_code=err_code, error_summ_desc=err_sum_desc, error_desc=err_desc) if job_warnings: LOG.warning("WMI job completed with warnings. For detailed " "information, please check the Windows event logs. " "Job details: %s.", job_details) else: LOG.debug("WMI job succeeded: %s.", job_details) return job
def run(self, *args): record = [] now = datetime.now() now_mono = monotonic() with open(FLIGHT_RECORD_FILE, "a") as record_f: record_f.write( "================%s[%0.8f] %s.%s.%s================\n" % (now.strftime('%Y-%m-%d %H:%M:%S'), now_mono, self.__class__.__module__, self.__class__.__name__, self._testMethodName)) with JournalRecorder("journal", record): with CmdFlightRecorder("udisksctl monitor", ["udisksctl", "monitor"], record): with CmdFlightRecorder("udevadm monitor", ["udevadm", "monitor"], record): super(UdisksTestCase, self).run(*args) record_f.write("".join(record))
def send(self): """ Send the contents of the output buffer to the network. """ if self.__closed: raise WireError("Closed") sent = 0 while self.__output: try: n = self.__socket.send(self.__output) except (IOError, OSError): self.__set_broken("Wire broken") else: self.__active_time = monotonic() self.__bytes_sent += n self.__output[:n] = [] sent += n return sent
def trigger(self, event, evtArgs=None): # logger.debug('-Triggered event- {0}'.format(event)) evt_list = [] if event in self.eventOrder: for name in self.eventOrder[event]: evt_list.append([event, name]) if Evt.ALL in self.eventOrder: for name in self.eventOrder[Evt.ALL]: evt_list.append([Evt.ALL, name]) if len(evt_list): for ev, name in evt_list: handler = self.events[ev][name] args = copy.copy(handler['defaultArgs']) if evtArgs: if args: args.update(evtArgs) else: args = evtArgs if ev == Evt.ALL: args['_eventName'] = event if handler['unique']: threadName = name + str(monotonic()) else: threadName = name # stop any threads with same name for token in self.eventThreads.copy(): if token in self.eventThreads and self.eventThreads[token][ 'name'] == name: self.eventThreads[token]['thread'].kill(block=False) if token in self.eventThreads and self.eventThreads[token][ 'thread'].dead: self.eventThreads.pop(token, False) if handler['priority'] < 100: handler['handlerFn'](args) else: greenlet = gevent.spawn(handler['handlerFn'], args) self.eventThreads[greenlet.minimal_ident] = { 'name': threadName, 'thread': greenlet }
def submit_timeout(self, timeout, fn, *args, **kwargs): """Like :code:`submit(fn, *args, **kwargs)`, but uses the specified timeout rather than this executor's default. .. versionadded:: 1.19.0 """ with self._shutdown.ensure_alive(): delegate_future = self._delegate.submit(fn, *args, **kwargs) future = MapFuture(delegate_future) track_future(future, type="timeout", executor=self._name) future.add_done_callback(self._on_future_done) job = Job(future, delegate_future, monotonic() + timeout) with self._jobs_lock: self._jobs.append(job) self._jobs_write.set() return future
def submit_retry(self, retry_policy, fn, *args, **kwargs): """Submit a callable with a specific retry policy. Parameters: retry_policy (RetryPolicy): a policy which is used for this call only """ future = RetryFuture(self) job = RetryJob(retry_policy, None, future, 0, monotonic(), fn, args, kwargs) self._append_job(job) # Let the submit thread know it should wake up to check for new jobs self._wake_thread() self._log.debug("Returning future %s", future) return future
def on_disconnect(self): try: if self.lastContactTime > 0: self.startConnectTime = monotonic() self.lastContactTime = -1 self.numDisconnects += 1 self.numDisconnsDuringRace += 1 upSecs = int(round(self.startConnectTime - self.firstContactTime)) if self.firstContactTime > 0 else 0 logger.warning("Disconnected from " + self.get_log_str(upSecs)); self.totalUpTimeSecs += upSecs if self.emit_cluster_connect_change: self.emit_cluster_connect_change(False) else: logger.debug("Received extra 'on_disconnect' event for secondary {0} at {1}".format(self.id+1, self.address)) except Exception: logger.exception("Error handling Cluster 'on_disconnect' for secondary {0} at {1}".\ format(self.id+1, self.address))
def __exit__(self, exc_type, exc_val, exc_tb): self.__depth -= 1 if self.__depth: return # Didn't exit all the contexts yet duration = monotonic.monotonic() - self.__start_time if duration > LONG_TRANSACTION_TRESHOLD: logger.warn('The transaction took a long time (%s seconds): %s', duration, traceback.format_stack()) self.__start_time = None if exc_type: logger.error('Rollback of transaction %s:%s/%s/%s', self, exc_type, exc_val, traceback.format_tb(exc_tb)) self.__connection.rollback() else: logger.debug('Commit of transaction %s', self) self.__connection.commit() self._cursor = None
def on_start_recording(self, task): """Notification that we are about to start an operation that needs to be recorded""" # Mark the start point in the various log files self.log_pos = {} if self.moz_log is not None: files = sorted(glob.glob(self.moz_log + '*')) for path in files: self.log_pos[path] = os.path.getsize(path) self.recording = True now = monotonic.monotonic() if not self.task['stop_at_onload']: self.last_activity = now if self.page_loaded is not None: self.page_loaded = now DesktopBrowser.on_start_recording(self, task) logging.debug('Starting measurement') task['start_time'] = datetime.utcnow()
def influx_reconnect(self, force=False): now = monotonic.monotonic() if not (force or self.influx_last_reconnect is None or self.influx_last_reconnect + 10 < now): # don't attempt to reconnect more than once per 10s return self.influx_last_reconnect = now # stop the old timer, if we need to if self.influx_timer: self.influx_timer.cancel() self.influx_timer = None # build up some kwargs to pass to InfluxDBClient kwargs = {} def add_arg_if_exists(kwargsname, path, getter=self._settings.get): v = getter(path) if v: kwargs[kwargsname] = v add_arg_if_exists('host', ['host']) add_arg_if_exists('port', ['port'], self._settings.get_int) if self._settings.get_boolean(['authenticate']): add_arg_if_exists('username', ['username']) add_arg_if_exists('password', ['password']) add_arg_if_exists('database', ['database']) kwargs['ssl'] = self._settings.get_boolean(['ssl']) if kwargs['ssl']: kwargs['verify_ssl'] = self._settings.get_boolean(['verify_ssl']) kwargs['use_udp'] = self._settings.get_boolean(['udp']) if kwargs['use_udp'] and 'port' in kwargs: kwargs['udp_port'] = kwargs['port'] del kwargs['port'] if self.influx_db is None or kwargs != self.influx_kwargs: self.influx_db = self.influx_try_connect(kwargs) if self.influx_db: self.influx_kwargs = kwargs self.influx_prefix = self._settings.get(['prefix']) or '' # start a new timer if self.influx_db: interval = self._settings.get_float(['interval'], min=0) if not interval: interval = self.get_settings_defaults()['interval'] self.influx_timer = octoprint.util.RepeatedTimer(interval, self.influx_gather) self.influx_timer.start()
def _submit_loop(executor_ref): # Runs in a separate thread continuously submitting to the delegate # executor until no jobs are ready, or waiting until next job is ready while True: executor = executor_ref() if not executor: break if executor._shutdown: break executor._log.debug("_submit_loop iter") with executor._lock: job = executor._get_next_job() if not job: executor._log.debug("No jobs at all. Waiting...") event = executor._submit_event del executor _submit_wait(event) continue if job.stop_retry: executor._log.debug("Discarding job due to cancel: %s", job) executor._pop_job(job) copy_future(job.old_delegate, job.future) continue now = monotonic() if job.when <= now: # Can submit immediately and check for next job executor._submit_now(job) continue # There is nothing to submit immediately. # Sleep until either: # - reaching the time of the nearest job, or... # - woken up by condvar delta = job.when - now executor._log.debug("No ready job. Waiting: %s", delta) event = executor._submit_event del executor del job _submit_wait(event, delta)
def _notify_progress_sync(self, uuid, client, done_count, total_count): """Notify progress reporting on the status of a script download. This function must be called synchronously inside of the event loop. Args: uuid (int): The id of the device that we are talking to client (string): The client identifier done_count (int): The number of items that have been finished total_count (int): The total number of items """ # If the connection was closed, don't notify anything conn_data = self._connections.get(uuid, None) if conn_data is None: return last_progress = conn_data['last_progress'] should_drop = False # We drop status updates that come faster than our configured update interval # unless those updates are the final update, which we send on. The first # update is always also sent since there would not have been an update before # that. now = monotonic() if last_progress is not None and ( now - last_progress) < self.throttle_progress: should_drop = True if should_drop and (done_count != total_count): return conn_data['last_progress'] = now slug = self._build_device_slug(uuid) status_msg = { 'type': 'notification', 'operation': 'send_script', 'client': client, 'done_count': done_count, 'total_count': total_count } self._publish_response(slug, status_msg)
def __init__(self, options, cache_dir): self.options = options self.device = options.device self.rndis = options.rndis self.ping_address = None self.screenrecord = None self.tcpdump = None self.version = None self.kernel = None self.short_version = None self.last_bytes_rx = 0 self.initialized = False self.this_path = os.path.abspath(os.path.dirname(__file__)) self.root_path = os.path.abspath(os.path.join(self.this_path, os.pardir)) self.cache_dir = cache_dir self.simplert_path = None self.simplert = None self.no_network_count = 0 self.last_network_ok = monotonic.monotonic() self.needs_exit = False self.rebooted = False self.vpn_forwarder = None self.known_apps = { 'com.motorola.ccc.ota': {}, 'com.google.android.apps.docs': {}, 'com.samsung.android.MtpApplication': {} } self.gnirehtet = None self.gnirehtet_exe = None if options.gnirehtet: if platform.system() == "Windows": if platform.machine().endswith('64'): self.gnirehtet_exe = os.path.join(self.root_path, 'gnirehtet', 'win64', 'gnirehtet.exe') elif platform.system() == "Linux": if os.uname()[4].startswith('arm'): self.gnirehtet_exe = os.path.join(self.root_path, 'gnirehtet', 'arm', 'gnirehtet') elif platform.architecture()[0] == '64bit': self.gnirehtet_exe = os.path.join(self.root_path, 'gnirehtet', 'linux64', 'gnirehtet') if self.gnirehtet_exe is not None: from .os_util import kill_all kill_all(os.path.basename(self.gnirehtet_exe), True) self.exe = 'adb'
def recv_cb(self, pkt, src_list=None): '''Default channel receive callback''' log_test.debug('Received packet from source %s, destination %s' % (pkt[IP].src, pkt[IP].dst)) if src_list is None: send_time = float(pkt[IP].payload.load) recv_time = monotonic.monotonic() log_test.debug('Packet received in %.3f usecs' % (recv_time - send_time)) elif (pkt[IP].src == src_list[0]): log_test.debug( 'Received packet from specified source %s, destination %s' % (pkt[IP].src, pkt[IP].dst)) elif (pkt[IP].src != src_list[0]): log_test.debug( 'Received packet not from specified source %s, destination %s' % (pkt[IP].src, pkt[IP].dst)) time.sleep(60)
def update_alarm_state(self): now = monotonic() with self.mutex: _log.debug( "Deciding whether to change alarm state - alarmed is {}, now is {}, next check time is {}, succeeded count is {}, failed count is {}" .format(self.alarmed, now, self._next_check, self.succeeded, self.failed)) if (now > self._next_check): _log.debug("Checking alarm state") if not self.alarmed: if self.succeeded == 0 and self.failed > 0: self.set_alarm() self._next_check = now + 30 else: if self.succeeded > 0: self.clear_alarm() self._next_check = now + 15 self.succeeded = self.failed = 0
def handle_repeat_stt(self, message): # replaces https://github.com/MatthewScholefield/skill-repeat-recent sources = message.context.get("destination", ["broadcast"]) if isinstance(sources, str): sources = [sources] utts = [] for source in sources: utts += self.heard_utts.get(source, []) ts = max([self.last_stt_time.get(source, 0) for source in sources]) if len(utts) < 2: last_stt = self.translate('nothing') else: last_stt = utts[-2] # last is current utt if monotonic() - ts > 120: self.speak_dialog('repeat.stt.old', {"stt": last_stt}) else: self.speak_dialog('repeat.stt', {"stt": last_stt}) self.update_picture(last_stt)
def prepare_task(self, task): """Format the file prefixes for multi-step testing""" task['page_data'] = {'date': time.time()} task['run_start_time'] = monotonic.monotonic() if task['current_step'] == 1: task['prefix'] = task['task_prefix'] task['video_subdirectory'] = task['task_video_prefix'] else: task['prefix'] = '{0}_{1:d}'.format(task['task_prefix'], task['current_step']) task['video_subdirectory'] = '{0}_{1:d}'.format( task['task_video_prefix'], task['current_step']) if task['video_subdirectory'] not in task['video_directories']: task['video_directories'].append(task['video_subdirectory']) if self.event_name is not None: task['step_name'] = self.event_name else: task['step_name'] = 'Step_{0:d}'.format(task['current_step'])
def __init__(self, path, options, job): DesktopBrowser.__init__(self, path, options, job) self.job = job self.task = None self.options = options self.path = path self.event_name = None self.etw = None self.etw_log = None self.driver = None self.nav_error = None self.page_loaded = None self.recording = False self.browser_version = None self.need_orange = True self.last_activity = monotonic.monotonic() self.script_dir = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'js')
def stream(frame, pipe): """ stream the images returned by generated to rtmp server. args: frame: an image frame pipe (subprocess.Popen): a pipe created with setup_stream_pipe() """ logger.debug("streaming new image") serialised = serialize_array(frame) for i in range(FPS): pipe.stdin.write(serialised) duty_cycle = 1 # seconds if pipe.poll(): print("looks like the video encoder died!") return time.sleep(duty_cycle - monotonic.monotonic() % duty_cycle)
def periodic_callback(self): """Periodically help maintain adapter internal state.""" now = monotonic.monotonic() if len(self.probe_callbacks) > 0: # Currently probing: check if not timed out if (now - self.last_probe) > self.get_config('default_timeout'): self._consume_probe_callbacks( False, 'Timeout while waiting for scan response') elif self.autoprobe_interval is not None: # Probe every `autoprobe_interval` seconds to keep up to date scan results if self.client.connected and ( now - self.last_probe) > self.autoprobe_interval: self.logger.info('Refreshing probe results...') self.probe_async( lambda adapter_id, success, failure_reason: None)
def test_no_race(self): node_index = 1 node = server.INTERFACE.nodes[node_index] self.assertIsNone(node.pass_count) gevent.sleep(1) # simulate a lap server.INTERFACE.simulate_lap(node_index) self.assertIsNone(node.pass_count) gevent.sleep(1) # hardware lap now = monotonic() server.INTERFACE.process_lap_stats(node, 1, now, 89, None, None, None) self.assertEqual(node.pass_count, 1) self.assertEqual(node.pass_peak_rssi, 89)
def _report_queries(self, descr=None): if not self.enable_report_queries: return if not hasattr(self, 'start_time'): return end_time = monotonic.monotonic() if descr is None: descr = "" nb_queries = 0 duration = timedelta() for conn in connections.all(): nb_queries += len(conn.queries) for query in conn.queries: convert = datetime.strptime(query['time'], "%S.%f") duration += timedelta(0, convert.second, convert.microsecond) # days, seconds, microseconds LOGGER.debug("(elapsed: %.2fs) %s: %s for %d SQL queries", (end_time - self.start_time), descr, duration, nb_queries)
def iter_input_batched(self, batch_size=BATCH_SIZE): """Get an iterable over this phase's input queue, yielding items in batches of the specified size. Stops iteration if the queue receives FINISHED (and does not yield that value, but yields the batch leading up to it). Raises if the queue receives ERROR. It is a bug to call this method on a phase with no input queue. """ while True: this_batch = [] start_time = monotonic() timeout = self.__batch_timeout def batch_ready(): return ( (len(this_batch) >= batch_size) or (monotonic() - start_time > timeout) or (this_batch[-1] in (Phase.FINISHED, Phase.ERROR)) ) this_batch.append(self.__get_input()) while not batch_ready(): try: this_batch.append(self.__get_input(timeout=timeout)) except Empty: # batch_ready() will now be true pass stop = False if this_batch[-1] is Phase.FINISHED: stop = True this_batch.pop(-1) if this_batch: yield this_batch if stop: # all done return