def _fill_metrics(self, stats, connection): self._add_stat(stats, "lastStatisticsCollectionTime", current_time_in_millis()) self._add_stat(stats, "enterprise", "false") self._add_stat(stats, "clientType", CLIENT_TYPE) self._add_stat(stats, "clientVersion", CLIENT_VERSION) self._add_stat(stats, "clusterConnectionTimestamp", to_millis(connection.start_time)) local_address = connection.local_address local_address = str(local_address.host) + ":" + str(local_address.port) self._add_stat(stats, "clientAddress", local_address) self._add_stat(stats, "clientName", self._client.name)
def time_out_or_sleep_before_next_try(self, start_millis, last_failed_member, last_exception): now_in_millis = current_time_in_millis() elapsed_millis = now_in_millis - start_millis invocation_time_out_millis = self._invocation_service.invocation_timeout * 1000 timed_out = elapsed_millis > invocation_time_out_millis if timed_out: raise OperationTimeoutError\ ("Registering listeners is timed out. Last failed member: %s, Current time: %s, Start time: %s, " "Client invocation timeout: %s, Elapsed time: %s ms, Cause: %s", last_failed_member, now_in_millis, start_millis, invocation_time_out_millis, elapsed_millis, last_exception.args[0]) else: sleep(self._invocation_service.invocation_retry_pause) # sleep before next try
def _add_client_attributes(self, attributes, connection): self._add_attribute(attributes, "lastStatisticsCollectionTime", current_time_in_millis()) self._add_attribute(attributes, "enterprise", "false") self._add_attribute(attributes, "clientType", CLIENT_TYPE) self._add_attribute(attributes, "clientVersion", __version__) self._add_attribute(attributes, "clusterConnectionTimestamp", to_millis(connection.start_time)) local_address = connection.local_address local_address = str(local_address.host) + ":" + str(local_address.port) self._add_attribute(attributes, "clientAddress", local_address) self._add_attribute(attributes, "clientName", self._client.name)
def _fill_metrics(self, stats, owner_connection): self._add_stat(stats, "lastStatisticsCollectionTime", current_time_in_millis()) self._add_stat(stats, "enterprise", "false") self._add_stat(stats, "clientType", CLIENT_TYPE) self._add_stat(stats, "clientVersion", CLIENT_VERSION) self._add_stat(stats, "clusterConnectionTimestamp", to_millis(owner_connection.start_time_in_seconds)) local_host, local_ip = owner_connection.socket.getsockname() local_address = str(local_host) + ":" + str(local_ip) self._add_stat(stats, "clientAddress", local_address) self._add_stat(stats, "clientName", self._client.name)
def _send_statistics(self): connection = self._connection_manager.get_random_connection() if not connection: self.logger.debug("Cannot send client statistics to the server. No connection found.", extra=self._logger_extras) return collection_timestamp = current_time_in_millis() stats = [] self._fill_metrics(stats, connection) self._add_near_cache_stats(stats) self._add_runtime_and_os_stats(stats) self._send_stats_to_owner(collection_timestamp, "".join(stats), connection)
def try_sync_connect_to_all_members(self): cluster_service = self._client.cluster start_millis = current_time_in_millis() while True: last_failed_member = None last_exception = None for member in cluster_service.members: try: self._client.connection_manager.get_or_connect(member.address).result() except Exception as e: last_failed_member = member last_exception = e if last_exception is None: break self.time_out_or_sleep_before_next_try(start_millis, last_failed_member, last_exception) if not self._client.lifecycle.is_live(): break
def _collect_and_send_stats(self): connection = self._connection_manager.get_random_connection() if not connection: _logger.debug( "Cannot send client statistics to the server. No connection found." ) return collection_timestamp = current_time_in_millis() attributes = [] compressor = MetricsCompressor() self._add_client_attributes(attributes, connection) self._add_near_cache_metrics(attributes, compressor) self._add_system_and_process_metrics(attributes, compressor) self._add_tcp_metrics(compressor) self._send_stats(collection_timestamp, "".join(attributes), compressor.generate_blob(), connection)
def next_id(self): if self._invalid_since <= current_time_in_millis(): return None return next(self._iterator, None)
def __init__(self, id_batch, validity_in_millis): self._id_batch = id_batch self._iterator = iter(self._id_batch) self._invalid_since = validity_in_millis + current_time_in_millis( ) if validity_in_millis > 0 else MAX_SIZE