def _check_server(self): """Call hello or read the next streaming response. Returns a ServerDescription. """ start = _time() try: try: return self._check_once() except (OperationFailure, NotPrimaryError) as exc: # Update max cluster time even when hello fails. self._topology.receive_cluster_time( exc.details.get('$clusterTime')) raise except ReferenceError: raise except Exception as error: _sanitize(error) sd = self._server_description address = sd.address duration = _time() - start if self._publish: awaited = sd.is_server_type_known and sd.topology_version self._listeners.publish_server_heartbeat_failed( address, duration, error, awaited) self._reset_connection() if isinstance(error, _OperationCancelled): raise self._rtt_monitor.reset() # Server type defaults to Unknown. return ServerDescription(address, error=error)
def _select_servers_loop(self, selector, timeout, address): """select_servers() guts. Hold the lock when calling this.""" now = _time() end_time = now + timeout server_descriptions = self._description.apply_selector( selector, address) while not server_descriptions: # No suitable servers. if timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( self._error_message(selector)) self._ensure_opened() self._request_check_all() # Release the lock and wait for the topology description to # change, or for a timeout. We won't miss any changes that # came after our most recent apply_selector call, since we've # held the lock until now. self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) self._description.check_compatible() now = _time() server_descriptions = self._description.apply_selector( selector, address) self._description.check_compatible() return server_descriptions
def _ping(self): """Run a "hello" command and return the RTT.""" with self._pool.get_socket({}) as sock_info: if self._executor._stopped: raise Exception('_RttMonitor closed') start = _time() sock_info.hello() return _time() - start
def select_servers(self, selector, server_selection_timeout=None, address=None): """Return a list of Servers matching selector, or time out. :Parameters: - `selector`: function that takes a list of Servers and returns a subset of them. - `server_selection_timeout` (optional): maximum seconds to wait. If not provided, the default value common.SERVER_SELECTION_TIMEOUT is used. - `address`: optional server address to select. Calls self.open() if needed. Raises exc:`ServerSelectionTimeoutError` after `server_selection_timeout` if no matching servers are found. """ if server_selection_timeout is None: server_timeout = self._settings.server_selection_timeout else: server_timeout = server_selection_timeout with self._lock: self._description.check_compatible() now = _time() end_time = now + server_timeout server_descriptions = self._description.apply_selector( selector, address) while not server_descriptions: # No suitable servers. if server_timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( self._error_message(selector)) self._ensure_opened() self._request_check_all() # Release the lock and wait for the topology description to # change, or for a timeout. We won't miss any changes that # came after our most recent apply_selector call, since we've # held the lock until now. self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) self._description.check_compatible() now = _time() server_descriptions = self._description.apply_selector( selector, address) return [ self.get_server_by_address(sd.address) for sd in server_descriptions ]
def select_servers(self, selector, server_selection_timeout=None, address=None): """Return a list of Servers matching selector, or time out. :Parameters: - `selector`: function that takes a list of Servers and returns a subset of them. - `server_selection_timeout` (optional): maximum seconds to wait. If not provided, the default value common.SERVER_SELECTION_TIMEOUT is used. - `address`: optional server address to select. Calls self.open() if needed. Raises exc:`ServerSelectionTimeoutError` after `server_selection_timeout` if no matching servers are found. """ if server_selection_timeout is None: server_timeout = self._settings.server_selection_timeout else: server_timeout = server_selection_timeout with self._lock: self._description.check_compatible() now = _time() end_time = now + server_timeout server_descriptions = self._description.apply_selector( selector, address) while not server_descriptions: # No suitable servers. if server_timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( self._error_message(selector)) self._ensure_opened() self._request_check_all() # Release the lock and wait for the topology description to # change, or for a timeout. We won't miss any changes that # came after our most recent apply_selector call, since we've # held the lock until now. self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) self._description.check_compatible() now = _time() server_descriptions = self._description.apply_selector( selector, address) return [self.get_server_by_address(sd.address) for sd in server_descriptions]
def _call(self, call, *args, **kwargs): timeout = self.gettimeout() if timeout: start = _time() while True: try: return call(*args, **kwargs) except _RETRY_ERRORS: self.socket_checker.select(self, True, True, timeout) if timeout and _time() - start > timeout: raise _socket.timeout("timed out") continue
def _check_with_retry(self): """Call ismaster once or twice. Reset server's pool on error. Returns a ServerDescription. """ # According to the spec, if an ismaster call fails we reset the # server's pool. If a server was once connected, change its type # to Unknown only after retrying once. address = self._server_description.address retry = True metadata = None if self._server_description.server_type == SERVER_TYPE.Unknown: retry = False metadata = self._pool.opts.metadata start = _time() try: cluster_time = self._topology.max_cluster_time() # If the server type is unknown, send metadata with first check. return self._check_once(metadata=metadata, cluster_time=cluster_time) except ReferenceError: raise except Exception as error: error_time = _time() - start if self._publish: self._listeners.publish_server_heartbeat_failed( address, error_time, error) self._topology.reset_pool(address) default = ServerDescription(address, error=error) if not retry: self._avg_round_trip_time.reset() # Server type defaults to Unknown. return default # Try a second and final time. If it fails return original error. # Always send metadata: this is a new connection. start = _time() try: cluster_time = self._topology.max_cluster_time() return self._check_once(metadata=self._pool.opts.metadata, cluster_time=cluster_time) except ReferenceError: raise except Exception as error: error_time = _time() - start if self._publish: self._listeners.publish_server_heartbeat_failed( address, error_time, error) self._avg_round_trip_time.reset() return default
def _check_with_socket(self, sock_info): """Return (IsMaster, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ start = _time() request_id, msg, max_doc_size = message.query(0, 'admin.$cmd', 0, -1, {'ismaster': 1}, None, DEFAULT_CODEC_OPTIONS) # TODO: use sock_info.command() sock_info.send_message(msg, max_doc_size) raw_response = sock_info.receive_message(1, request_id) result = helpers._unpack_response(raw_response) return IsMaster(result['data'][0]), _time() - start
def _check_with_socket(self, sock_info): """Return (IsMaster, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ start = _time() try: return (sock_info.ismaster(self._pool.opts.metadata, self._topology.max_cluster_time()), _time() - start) except OperationFailure as exc: # Update max cluster time even when isMaster fails. self._topology.receive_cluster_time( exc.details.get('$clusterTime')) raise
def _check_with_socket(self, sock_info): """Return (IsMaster, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ start = _time() request_id, msg, max_doc_size = message.query( 0, 'admin.$cmd', 0, -1, {'ismaster': 1}, None, DEFAULT_CODEC_OPTIONS) # TODO: use sock_info.command() sock_info.send_message(msg, max_doc_size) raw_response = sock_info.receive_message(1, request_id) result = helpers._unpack_response(raw_response) return IsMaster(result['data'][0]), _time() - start
def __init__(self, sock, pool, address, id): self.sock = sock self.address = address self.id = id self.authset = set() self.closed = False self.last_checkin_time = _time() self.performed_handshake = False self.is_writable = False self.max_wire_version = MAX_WIRE_VERSION self.max_bson_size = MAX_BSON_SIZE self.max_message_size = MAX_MESSAGE_SIZE self.max_write_batch_size = MAX_WRITE_BATCH_SIZE self.supports_sessions = False self.is_mongos = False self.op_msg_enabled = False self.listeners = pool.opts.event_listeners self.enabled_for_cmap = pool.enabled_for_cmap self.compression_settings = pool.opts.compression_settings self.compression_context = None # The pool's pool_id changes with each reset() so we can close sockets # created before the last reset. self.pool_id = pool.pool_id self.ready = False
def __init__( self, address, ismaster=None, round_trip_time=None, error=None): self._address = address if not ismaster: ismaster = IsMaster({}) self._server_type = ismaster.server_type self._all_hosts = ismaster.all_hosts self._tags = ismaster.tags self._replica_set_name = ismaster.replica_set_name self._primary = ismaster.primary self._max_bson_size = ismaster.max_bson_size self._max_message_size = ismaster.max_message_size self._max_write_batch_size = ismaster.max_write_batch_size self._min_wire_version = ismaster.min_wire_version self._max_wire_version = ismaster.max_wire_version self._set_version = ismaster.set_version self._election_id = ismaster.election_id self._is_writable = ismaster.is_writable self._is_readable = ismaster.is_readable self._round_trip_time = round_trip_time self._me = ismaster.me self._last_update_time = _time() self._error = error if ismaster.last_write_date: # Convert from datetime to seconds. delta = ismaster.last_write_date - EPOCH_NAIVE self._last_write_date = _total_seconds(delta) else: self._last_write_date = None
def _get_socket_no_auth(self): """Get or create a SocketInfo. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. # See test.test_client:TestClient.test_fork for an example of # what could go wrong otherwise if self.pid != os.getpid(): self.reset() # Get a free socket or create one. if not self._socket_semaphore.acquire( True, self.opts.wait_queue_timeout): self._raise_wait_queue_timeout() # We've now acquired the semaphore and must release it on error. try: try: # set.pop() isn't atomic in Jython less than 2.7, see # http://bugs.jython.org/issue1854 with self.lock: sock_info, from_pool = self.sockets.pop(), True except KeyError: # Can raise ConnectionFailure or CertificateError. sock_info, from_pool = self.connect(), False if from_pool: # Can raise ConnectionFailure. sock_info = self._check(sock_info) except: self._socket_semaphore.release() raise sock_info.last_checkout = _time() return sock_info
def _check(self, sock_info): """This side-effecty function checks if this pool has been reset since the last time this socket was used, or if the socket has been closed by some external network error, and if so, attempts to create a new socket. If this connection attempt fails we reset the pool and reraise the ConnectionFailure. Checking sockets lets us avoid seeing *some* :class:`~pymongo.errors.AutoReconnect` exceptions on server hiccups, etc. We only do this if it's been > 1 second since the last socket checkout, to keep performance reasonable - we can't avoid AutoReconnects completely anyway. """ error = False # How long since socket was last checked out. age = _time() - sock_info.last_checkout if (self._check_interval_seconds is not None and ( 0 == self._check_interval_seconds or age > self._check_interval_seconds)): if socket_closed(sock_info.sock): sock_info.close() error = True if not error: return sock_info else: return self.connect()
def _check(self, sock_info): """This side-effecty function checks if this socket has been idle for for longer than the max idle time, or if the socket has been closed by some external network error, and if so, attempts to create a new socket. If this connection attempt fails we raise the ConnectionFailure. Checking sockets lets us avoid seeing *some* :class:`~pymongo.errors.AutoReconnect` exceptions on server hiccups, etc. We only check if the socket was closed by an external error if it has been > 1 second since the socket was checked into the pool, to keep performance reasonable - we can't avoid AutoReconnects completely anyway. """ # How long since socket was last checked in. idle_time_seconds = _time() - sock_info.last_checkin # If socket is idle, open a new one. if (self.opts.max_idle_time_ms is not None and idle_time_seconds * 1000 > self.opts.max_idle_time_ms): sock_info.close() return self.connect() if (self._check_interval_seconds is not None and ( 0 == self._check_interval_seconds or idle_time_seconds > self._check_interval_seconds)): if self.socket_checker.socket_closed(sock_info.sock): sock_info.close() return self.connect() return sock_info
def __init__(self, address, ismaster=None, round_trip_time=None, error=None): self._address = address if not ismaster: ismaster = IsMaster({}) self._server_type = ismaster.server_type self._all_hosts = ismaster.all_hosts self._tags = ismaster.tags self._replica_set_name = ismaster.replica_set_name self._primary = ismaster.primary self._max_bson_size = ismaster.max_bson_size self._max_message_size = ismaster.max_message_size self._max_write_batch_size = ismaster.max_write_batch_size self._min_wire_version = ismaster.min_wire_version self._max_wire_version = ismaster.max_wire_version self._set_version = ismaster.set_version self._election_id = ismaster.election_id self._is_writable = ismaster.is_writable self._is_readable = ismaster.is_readable self._round_trip_time = round_trip_time self._me = ismaster.me self._last_update_time = _time() self._error = error if ismaster.last_write_date: # Convert from datetime to seconds. delta = ismaster.last_write_date - EPOCH_NAIVE self._last_write_date = _total_seconds(delta) else: self._last_write_date = None
def __init__(self, sock, pool, ismaster, address): self.sock = sock self.address = address self.authset = set() self.closed = False self.last_checkin = _time() self.is_writable = ismaster.is_writable if ismaster else None self.max_wire_version = ismaster.max_wire_version if ismaster else None self.max_bson_size = ismaster.max_bson_size if ismaster else None self.max_message_size = (ismaster.max_message_size if ismaster else MAX_MESSAGE_SIZE) self.max_write_batch_size = (ismaster.max_write_batch_size if ismaster else None) self.supports_sessions = (ismaster and ismaster.logical_session_timeout_minutes is not None) self.listeners = pool.opts.event_listeners if ismaster: self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos else: self.is_mongos = None # The pool's pool_id changes with each reset() so we can close sockets # created before the last reset. self.pool_id = pool.pool_id
def _check_with_retry(self): """Call ismaster once or twice. Reset server's pool on error. Returns a ServerDescription. """ # According to the spec, if an ismaster call fails we reset the # server's pool. If a server was once connected, change its type # to Unknown only after retrying once. address = self._server_description.address retry = True metadata = None if self._server_description.server_type == SERVER_TYPE.Unknown: retry = False metadata = self._pool.opts.metadata start = _time() try: # If the server type is unknown, send metadata with first check. return self._check_once(metadata=metadata) except ReferenceError: raise except Exception as error: error_time = _time() - start if self._publish: self._listeners.publish_server_heartbeat_failed( address, error_time, error) self._topology.reset_pool(address) default = ServerDescription(address, error=error) if not retry: self._avg_round_trip_time.reset() # Server type defaults to Unknown. return default # Try a second and final time. If it fails return original error. # Always send metadata: this is a new connection. start = _time() try: return self._check_once(metadata=self._pool.opts.metadata) except ReferenceError: raise except Exception as error: error_time = _time() - start if self._publish: self._listeners.publish_server_heartbeat_failed( address, error_time, error) self._avg_round_trip_time.reset() return default
def _test_ops(self, client, *ops, **kwargs): listener = client.event_listeners()[0][0] for f, args, kw in ops: with client.start_session() as s: last_use = s._server_session.last_use start = _time() self.assertLessEqual(last_use, start) listener.results.clear() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) kw['session'] = s f(*args, **kw) self.assertGreaterEqual(s._server_session.last_use, start) self.assertGreaterEqual(len(listener.results['started']), 1) for event in listener.results['started']: self.assertTrue( 'lsid' in event.command, "%s sent no lsid with %s" % ( f.__name__, event.command_name)) self.assertEqual( s.session_id, event.command['lsid'], "%s sent wrong lsid with %s" % ( f.__name__, event.command_name)) self.assertFalse(s.has_ended) self.assertTrue(s.has_ended) with self.assertRaisesRegex(InvalidOperation, "ended session"): f(*args, **kw) if kwargs.get('explicit_only'): return # No explicit session. for f, args, kw in ops: listener.results.clear() f(*args, **kw) self.assertGreaterEqual(len(listener.results['started']), 1) lsids = [] for event in listener.results['started']: self.assertTrue( 'lsid' in event.command, "%s sent no lsid with %s" % ( f.__name__, event.command_name)) lsids.append(event.command['lsid']) if not (sys.platform.startswith('java') or 'PyPy' in sys.version): # Server session was returned to pool. Ignore interpreters with # non-deterministic GC. for lsid in lsids: self.assertIn( lsid, session_ids(client), "%s did not return implicit session to pool" % ( f.__name__,))
def _popen_wait(popen, timeout): """Implement wait timeout support for Python 2.""" from pymongo.monotonic import time as _time deadline = _time() + timeout # Initial delay of 1ms delay = .0005 while True: returncode = popen.poll() if returncode is not None: return returncode remaining = deadline - _time() if remaining <= 0: # Just return None instead of raising an error. return None delay = min(delay * 2, remaining, .5) time.sleep(delay)
def _check_with_socket(self, sock_info, metadata=None): """Return (IsMaster, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ cmd = SON([('ismaster', 1)]) if metadata is not None: cmd['client'] = metadata start = _time() request_id, msg, max_doc_size = message.query( 0, 'admin.$cmd', 0, -1, cmd, None, DEFAULT_CODEC_OPTIONS) # TODO: use sock_info.command() sock_info.send_message(msg, max_doc_size) reply = sock_info.receive_message(request_id) return IsMaster(reply.command_response()), _time() - start
def _run(self): while not self._stopped: try: if not self._target(): self._stopped = True break except: self._stopped = True raise deadline = _time() + self._interval while not self._stopped and _time() < deadline: time.sleep(self._min_interval) if self._event: break # Early wake. self._event = False
def _check_with_socket(self, conn): """Return (Hello, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ cluster_time = self._topology.max_cluster_time() start = _time() if conn.more_to_come: # Read the next streaming hello (MongoDB 4.4+). response = IsMaster(conn._next_reply(), awaitable=True) elif (conn.performed_handshake and self._server_description.topology_version): # Initiate streaming hello (MongoDB 4.4+). response = conn._hello(cluster_time, self._server_description.topology_version, self._settings.heartbeat_frequency, None) else: # New connection handshake or polling hello (MongoDB <4.4). response = conn._hello(cluster_time, None, None, None) return response, _time() - start
def _check_with_retry(self): """Call ismaster once or twice. Reset server's pool on error. Returns a ServerDescription. """ # According to the spec, if an ismaster call fails we reset the # server's pool. If a server was once connected, change its type # to Unknown only after retrying once. address = self._server_description.address retry = self._server_description.server_type != SERVER_TYPE.Unknown start = _time() try: return self._check_once() except ReferenceError: raise except Exception as error: error_time = _time() - start self._topology.reset_pool(address) default = ServerDescription(address, error=error) if not retry: if self._publish: self._listeners.publish_server_heartbeat_failed( address, error_time, error) self._avg_round_trip_time.reset() # Server type defaults to Unknown. return default # Try a second and final time. If it fails return original error. start = _time() try: return self._check_once() except ReferenceError: raise except Exception as error: error_time = _time() - start if self._publish: self._listeners.publish_server_heartbeat_failed( address, error_time, error) self._avg_round_trip_time.reset() return default
def _check_with_socket(self, sock_info, metadata=None): """Return (IsMaster, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ cmd = SON([('ismaster', 1)]) if metadata is not None: cmd['client'] = metadata if self._server_description.max_wire_version >= 6: cluster_time = self._topology.max_cluster_time() if cluster_time is not None: cmd['$clusterTime'] = cluster_time start = _time() request_id, msg, max_doc_size = message.query( 0, 'admin.$cmd', 0, -1, cmd, None, DEFAULT_CODEC_OPTIONS) # TODO: use sock_info.command() sock_info.send_message(msg, max_doc_size) reply = sock_info.receive_message(request_id) return IsMaster(reply.command_response()), _time() - start
def _run(self): while not self.__should_stop(): try: if not self._target(): self._stopped = True break except: with self._lock: self._stopped = True self._thread_will_exit = True raise deadline = _time() + self._interval while not self._stopped and _time() < deadline: time.sleep(self._min_interval) if self._event: break # Early wake. self._event = False
def wait_for(condition, predicate, timeout=None): """Wait until a condition evaluates to True. predicate should be a callable which result will be interpreted as a boolean value. A timeout may be provided giving the maximum time to wait. """ endtime = None waittime = timeout result = predicate() while not result: if waittime is not None: if endtime is None: endtime = _time() + waittime else: waittime = endtime - _time() if waittime <= 0: break condition.wait(waittime) result = predicate() return result
def remove_stale_sockets(self): if self.opts.max_idle_time_ms is not None: with self.lock: for sock_info in self.sockets.copy(): idle_time_ms = 1000 * _time() - sock_info.last_checkin if idle_time_ms > self.opts.max_idle_time_ms: self.sockets.remove(sock_info) sock_info.close() while len( self.sockets) + self.active_sockets < self.opts.min_pool_size: sock_info = self.connect() with self.lock: self.sockets.add(sock_info)
def _run(self): while not self._stopped: try: if not self._target(): self._stopped = True break except: self._stopped = True raise deadline = _time() + self._interval # Avoid running too frequently if wake() is called very often. time.sleep(self._min_interval) # Until the deadline, wake often to check if close() was called. while not self._stopped and _time() < deadline: # Our Event's wait returns True if set, else False. if self._event.wait(0.1): # Someone called wake(). break self._event.clear()
def remove_stale_sockets(self): with self.lock: if self.opts.max_idle_time_ms is not None: for sock_info in self.sockets.copy(): age = _time() - sock_info.last_checkout if age > self.opts.max_idle_time_ms: self.sockets.remove(sock_info) sock_info.close() while len( self.sockets) + self.active_sockets < self.opts.min_pool_size: sock_info = self.connect() with self.lock: self.sockets.add(sock_info)
def return_socket(self, sock_info): """Return the socket to the pool, or if it's closed discard it.""" if self.pid != os.getpid(): self.reset() else: if sock_info.pool_id != self.pool_id: sock_info.close() elif not sock_info.closed: sock_info.last_checkin = _time() with self.lock: self.sockets.add(sock_info) self._socket_semaphore.release() with self.lock: self.active_sockets -= 1
def __init__(self, sock, pool, address): self.sock = sock self.address = address self.authset = set() self.closed = False self.last_checkin_time = _time() self.performed_handshake = False self.is_writable = False self.max_wire_version = MAX_WIRE_VERSION self.max_bson_size = MAX_BSON_SIZE self.max_message_size = MAX_MESSAGE_SIZE self.max_write_batch_size = MAX_WRITE_BATCH_SIZE self.supports_sessions = False self.is_mongos = False self.listeners = pool.opts.event_listeners # The pool's pool_id changes with each reset() so we can close sockets # created before the last reset. self.pool_id = pool.pool_id
def __init__( self, address, ismaster=None, round_trip_time=None, error=None): self._address = address if not ismaster: ismaster = IsMaster({}) self._server_type = ismaster.server_type self._all_hosts = ismaster.all_hosts self._tags = ismaster.tags self._replica_set_name = ismaster.replica_set_name self._primary = ismaster.primary self._max_bson_size = ismaster.max_bson_size self._max_message_size = ismaster.max_message_size self._max_write_batch_size = ismaster.max_write_batch_size self._min_wire_version = ismaster.min_wire_version self._max_wire_version = ismaster.max_wire_version self._set_version = ismaster.set_version self._election_id = ismaster.election_id self._cluster_time = ismaster.cluster_time self._is_writable = ismaster.is_writable self._is_readable = ismaster.is_readable self._ls_timeout_minutes = ismaster.logical_session_timeout_minutes self._round_trip_time = round_trip_time self._me = ismaster.me self._last_update_time = _time() self._error = error self._topology_version = ismaster.topology_version if error: if hasattr(error, 'details') and isinstance(error.details, dict): self._topology_version = error.details.get('topologyVersion') if ismaster.last_write_date: # Convert from datetime to seconds. delta = ismaster.last_write_date - EPOCH_NAIVE self._last_write_date = delta.total_seconds() else: self._last_write_date = None
def __init__(self, sock, pool, ismaster, address): self.sock = sock self.address = address self.authset = set() self.closed = False self.last_checkout = _time() self.is_writable = ismaster.is_writable if ismaster else None self.max_wire_version = ismaster.max_wire_version if ismaster else None self.max_bson_size = ismaster.max_bson_size if ismaster else None self.max_message_size = ismaster.max_message_size if ismaster else None self.max_write_batch_size = (ismaster.max_write_batch_size if ismaster else None) if ismaster: self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos else: self.is_mongos = None # The pool's pool_id changes with each reset() so we can close sockets # created before the last reset. self.pool_id = pool.pool_id
def __init__(self, sock, pool, ismaster, address): self.sock = sock self.address = address self.authset = set() self.closed = False self.last_checkout = _time() self.is_writable = ismaster.is_writable if ismaster else None self.max_wire_version = ismaster.max_wire_version if ismaster else None self.max_bson_size = ismaster.max_bson_size if ismaster else None self.max_message_size = ismaster.max_message_size if ismaster else None self.max_write_batch_size = ( ismaster.max_write_batch_size if ismaster else None) if ismaster: self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos else: self.is_mongos = None # The pool's pool_id changes with each reset() so we can close sockets # created before the last reset. self.pool_id = pool.pool_id
def update_last_checkin_time(self): self.last_checkin_time = _time()
def idle_time_seconds(self): """Seconds since this socket was last checked into its pool.""" return _time() - self.last_checkin_time
def _test_ops(self, client, *ops): listener = client.event_listeners()[0][0] for f, args, kw in ops: with client.start_session() as s: last_use = s._server_session.last_use start = _time() self.assertLessEqual(last_use, start) listener.results.clear() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) kw['session'] = s f(*args, **kw) self.assertGreaterEqual(s._server_session.last_use, start) self.assertGreaterEqual(len(listener.results['started']), 1) for event in listener.results['started']: self.assertTrue( 'lsid' in event.command, "%s sent no lsid with %s" % ( f.__name__, event.command_name)) self.assertEqual( s.session_id, event.command['lsid'], "%s sent wrong lsid with %s" % ( f.__name__, event.command_name)) self.assertFalse(s.has_ended) self.assertTrue(s.has_ended) with self.assertRaisesRegex(InvalidOperation, "ended session"): f(*args, **kw) # Test a session cannot be used on another client. with self.client2.start_session() as s: # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) kw['session'] = s with self.assertRaisesRegex( InvalidOperation, 'Can only use session with the MongoClient' ' that started it'): f(*args, **kw) # No explicit session. for f, args, kw in ops: listener.results.clear() f(*args, **kw) self.assertGreaterEqual(len(listener.results['started']), 1) lsids = [] for event in listener.results['started']: self.assertTrue( 'lsid' in event.command, "%s sent no lsid with %s" % ( f.__name__, event.command_name)) lsids.append(event.command['lsid']) if not (sys.platform.startswith('java') or 'PyPy' in sys.version): # Server session was returned to pool. Ignore interpreters with # non-deterministic GC. for lsid in lsids: self.assertIn( lsid, session_ids(client), "%s did not return implicit session to pool" % ( f.__name__,))
def _ping(self): """Run an "isMaster" command and return the RTT.""" with self._pool.get_socket({}) as sock_info: start = _time() sock_info.ismaster() return _time() - start