def setUp(self): super(TestReadPreference, self).setUp() members = [ # primary {'tags': {'dc': 'ny', 'name': 'primary'}}, # secondary {'tags': {'dc': 'la', 'name': 'secondary'}, 'priority': 0}, # other_secondary {'tags': {'dc': 'ny', 'name': 'other_secondary'}, 'priority': 0}, ] res = ha_tools.start_replica_set(members) self.seed, self.name = res primary = ha_tools.get_primary() self.primary = partition_node(primary) self.primary_tags = ha_tools.get_tags(primary) # Make sure priority worked self.assertEqual('primary', self.primary_tags['name']) self.primary_dc = {'dc': self.primary_tags['dc']} secondaries = ha_tools.get_secondaries() (secondary, ) = [ s for s in secondaries if ha_tools.get_tags(s)['name'] == 'secondary'] self.secondary = partition_node(secondary) self.secondary_tags = ha_tools.get_tags(secondary) self.secondary_dc = {'dc': self.secondary_tags['dc']} (other_secondary, ) = [ s for s in secondaries if ha_tools.get_tags(s)['name'] == 'other_secondary'] self.other_secondary = partition_node(other_secondary) self.other_secondary_tags = ha_tools.get_tags(other_secondary) self.other_secondary_dc = {'dc': self.other_secondary_tags['dc']} self.c = MongoClient( self.seed, replicaSet=self.name, serverSelectionTimeoutMS=self.server_selection_timeout) self.w = len(self.c.secondaries) + 1 self.db = self.c.get_database("pymongo_test", write_concern=WriteConcern(w=self.w)) self.db.test.delete_many({}) self.db.test.insert_many([{'foo': i} for i in xrange(10)]) self.clear_ping_times()
def setup_sync_cx(self): """Get a synchronous PyMongo MongoClient and determine SSL config.""" host = os.environ.get("DB_IP", "localhost") port = int(os.environ.get("DB_PORT", 27017)) connectTimeoutMS = 100 serverSelectionTimeoutMS = 100 try: client = connected(pymongo.MongoClient( host, port, connectTimeoutMS=connectTimeoutMS, serverSelectionTimeoutMS=serverSelectionTimeoutMS, ssl_ca_certs=CA_PEM, ssl=True)) self.mongod_started_with_ssl = True except pymongo.errors.ServerSelectionTimeoutError: try: client = connected(pymongo.MongoClient( host, port, connectTimeoutMS=connectTimeoutMS, serverSelectionTimeoutMS=serverSelectionTimeoutMS, ssl_ca_certs=CA_PEM, ssl_certfile=CLIENT_PEM)) self.mongod_started_with_ssl = True self.mongod_validates_client_cert = True except pymongo.errors.ServerSelectionTimeoutError: client = connected(pymongo.MongoClient(host, port)) response = client.admin.command('ismaster') if 'setName' in response: self.is_replica_set = True self.rs_name = str(response['setName']) self.w = len(response['hosts']) self.hosts = set([partition_node(h) for h in response["hosts"]]) host, port = self.primary = partition_node(response['primary']) self.arbiters = set([ partition_node(h) for h in response.get("arbiters", [])]) self.secondaries = [ partition_node(m) for m in response['hosts'] if m != self.primary and m not in self.arbiters] # Reconnect to found primary, without short timeouts. if self.mongod_started_with_ssl: client = connected(pymongo.MongoClient(host, port, ssl_ca_certs=CA_PEM, ssl_certfile=CLIENT_PEM)) else: client = connected(pymongo.MongoClient(host, port, ssl=False)) self.sync_cx = client self.host = host self.port = port
def got_app_error(topology, app_error): server_address = common.partition_node(app_error['address']) server = topology.get_server_by_address(server_address) error_type = app_error['type'] generation = app_error.get( 'generation', server.pool.gen.get_overall()) when = app_error['when'] max_wire_version = app_error['maxWireVersion'] # XXX: We could get better test coverage by mocking the errors on the # Pool/SocketInfo. try: if error_type == 'command': _check_command_response(app_error['response'], max_wire_version) _check_write_command_response(app_error['response']) elif error_type == 'network': raise AutoReconnect('mock non-timeout network error') elif error_type == 'timeout': raise NetworkTimeout('mock network timeout error') else: raise AssertionError('unknown error type: %s' % (error_type,)) assert False except (AutoReconnect, NotPrimaryError, OperationFailure) as e: if when == 'beforeHandshakeCompletes': completed_handshake = False elif when == 'afterHandshakeCompletes': completed_handshake = True else: assert False, 'Unknown when field %s' % (when,) topology.handle_error( server_address, _ErrorContext(e, max_wire_version, generation, completed_handshake, None))
def check_outcome(self, topology, outcome): expected_servers = outcome['servers'] # Check weak equality before proceeding. self.assertEqual(len(topology.description.server_descriptions()), len(expected_servers)) # Since lengths are equal, every actual server must have a corresponding # expected server. for expected_server_address, expected_server in expected_servers.items(): node = common.partition_node(expected_server_address) self.assertTrue(topology.has_server(node)) actual_server = topology.get_server_by_address(node) actual_server_description = actual_server.description if expected_server['type'] == 'PossiblePrimary': # Special case, some tests in the spec include the PossiblePrimary # type, but only single-threaded drivers need that type. We call # possible primaries Unknown. expected_server_type = SERVER_TYPE.Unknown else: expected_server_type = getattr(SERVER_TYPE, expected_server['type']) self.assertEqual( server_type_name(expected_server_type), server_type_name(actual_server_description.server_type)) self.assertEqual(expected_server['setName'], actual_server_description.replica_set_name) self.assertEqual(outcome['setName'], topology.description.replica_set_name) expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType']) self.assertEqual(topology_type_name(expected_topology_type), topology_type_name(topology.description.topology_type))
def got_app_error(topology, app_error): server_address = common.partition_node(app_error['address']) server = topology.get_server_by_address(server_address) error_type = app_error['type'] generation = app_error.get('generation', server.pool.generation) when = app_error['when'] max_wire_version = app_error['maxWireVersion'] # XXX: We could get better test coverage by mocking the errors on the # Pool/SocketInfo. try: if error_type == 'command': _check_command_response(app_error['response']) elif error_type == 'network': raise AutoReconnect('mock non-timeout network error') elif error_type == 'timeout': raise NetworkTimeout('mock network timeout error') else: raise AssertionError('unknown error type: %s' % (error_type, )) assert False except (AutoReconnect, NotMasterError, OperationFailure) as e: if when == 'beforeHandshakeCompletes' and error_type == 'timeout': raise unittest.SkipTest('PYTHON-2211') topology.handle_error(server_address, _ErrorContext(e, max_wire_version, generation))
def setUpClass(cls): super(TestBulkWriteConcern, cls).setUpClass() cls.w = client_context.w cls.secondary = None if cls.w > 1: for member in client_context.hello['hosts']: if member != client_context.hello['primary']: cls.secondary = single_client(*partition_node(member)) break
def check_outcome(self, topology, outcome): expected_servers = outcome['servers'] # Check weak equality before proceeding. self.assertEqual( len(topology.description.server_descriptions()), len(expected_servers)) if outcome.get('compatible') is False: with self.assertRaises(ConfigurationError): topology.description.check_compatible() else: # No error. topology.description.check_compatible() # Since lengths are equal, every actual server must have a corresponding # expected server. for expected_server_address, expected_server in expected_servers.items(): node = common.partition_node(expected_server_address) self.assertTrue(topology.has_server(node)) actual_server = topology.get_server_by_address(node) actual_server_description = actual_server.description expected_server_type = server_name_to_type(expected_server['type']) self.assertEqual( server_type_name(expected_server_type), server_type_name(actual_server_description.server_type)) self.assertEqual( expected_server.get('setName'), actual_server_description.replica_set_name) self.assertEqual( expected_server.get('setVersion'), actual_server_description.set_version) self.assertEqual( expected_server.get('electionId'), actual_server_description.election_id) self.assertEqual( expected_server.get('topologyVersion'), actual_server_description.topology_version) expected_pool = expected_server.get('pool') if expected_pool: self.assertEqual( expected_pool.get('generation'), actual_server.pool.gen.get_overall()) self.assertEqual(outcome['setName'], topology.description.replica_set_name) self.assertEqual(outcome.get('logicalSessionTimeoutMinutes'), topology.description.logical_session_timeout_minutes) expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType']) self.assertEqual(topology_type_name(expected_topology_type), topology_type_name(topology.description.topology_type))
def run_scenario(self): c = create_mock_topology(scenario_def['uri']) for phase in scenario_def['phases']: for response in phase['responses']: got_ismaster(c, common.partition_node(response[0]), response[1]) check_outcome(self, c, phase['outcome'])
def check_outcome(self, topology, outcome): expected_servers = outcome['servers'] # Check weak equality before proceeding. self.assertEqual( len(topology.description.server_descriptions()), len(expected_servers)) if outcome.get('compatible') is False: with self.assertRaises(ConfigurationError): topology.description.check_compatible() else: # No error. topology.description.check_compatible() # Since lengths are equal, every actual server must have a corresponding # expected server. for expected_server_address, expected_server in expected_servers.items(): node = common.partition_node(expected_server_address) self.assertTrue(topology.has_server(node)) actual_server = topology.get_server_by_address(node) actual_server_description = actual_server.description if expected_server['type'] == 'PossiblePrimary': # Special case, some tests in the spec include the PossiblePrimary # type, but only single-threaded drivers need that type. We call # possible primaries Unknown. expected_server_type = SERVER_TYPE.Unknown else: expected_server_type = getattr( SERVER_TYPE, expected_server['type']) self.assertEqual( server_type_name(expected_server_type), server_type_name(actual_server_description.server_type)) self.assertEqual( expected_server.get('setName'), actual_server_description.replica_set_name) self.assertEqual( expected_server.get('setVersion'), actual_server_description.set_version) self.assertEqual( expected_server.get('electionId'), actual_server_description.election_id) self.assertEqual(outcome['setName'], topology.description.replica_set_name) self.assertEqual(outcome['logicalSessionTimeoutMinutes'], topology.description.logical_session_timeout_minutes) expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType']) self.assertEqual(topology_type_name(expected_topology_type), topology_type_name(topology.description.topology_type))
def setUpClass(cls): super(TestReplicaSetClientBase, cls).setUpClass() cls.name = client_context.replica_set_name cls.w = client_context.w ismaster = client_context.ismaster cls.hosts = set(partition_node(h) for h in ismaster['hosts']) cls.arbiters = set( partition_node(h) for h in ismaster.get("arbiters", [])) repl_set_status = client_context.client.admin.command( 'replSetGetStatus') primary_info = [ m for m in repl_set_status['members'] if m['stateStr'] == 'PRIMARY' ][0] cls.primary = partition_node(primary_info['name']) cls.secondaries = set( partition_node(m['name']) for m in repl_set_status['members'] if m['stateStr'] == 'SECONDARY')
def setUpClass(cls): super(TestReplicaSetClientBase, cls).setUpClass() cls.name = client_context.replica_set_name cls.w = client_context.w ismaster = client_context.ismaster cls.hosts = set(partition_node(h) for h in ismaster['hosts']) cls.arbiters = set(partition_node(h) for h in ismaster.get("arbiters", [])) repl_set_status = client_context.client.admin.command( 'replSetGetStatus') primary_info = [ m for m in repl_set_status['members'] if m['stateStr'] == 'PRIMARY' ][0] cls.primary = partition_node(primary_info['name']) cls.secondaries = set( partition_node(m['name']) for m in repl_set_status['members'] if m['stateStr'] == 'SECONDARY')
def test_recovering_member_triggers_refresh(self): # To test that find_one() and count() trigger immediate refreshes, # we'll create a separate client for each self.c_find_one, self.c_count = [ MongoClient( self.seed, replicaSet=self.name, read_preference=SECONDARY, serverSelectionTimeoutMS=self.server_selection_timeout) for _ in xrange(2)] # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] # Pre-condition: just make sure they all connected OK for c in self.c_find_one, self.c_count: wait_until( lambda: c.primary == partition_node(primary), 'connect to the primary') wait_until( lambda: one(c.secondaries) == partition_node(secondary), 'connect to the secondary') ha_tools.set_maintenance(secondary, True) # Trigger a refresh in various ways self.assertRaises(AutoReconnect, self.c_find_one.test.test.find_one) self.assertRaises(AutoReconnect, self.c_count.test.test.count) # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled time.sleep(1) self.assertFalse(self.c_find_one.secondaries) self.assertEqual(partition_node(primary), self.c_find_one.primary) self.assertFalse(self.c_count.secondaries) self.assertEqual(partition_node(primary), self.c_count.primary)
def setUpClass(cls): super(TestBulkWriteConcern, cls).setUpClass() cls.w = client_context.w cls.secondary = None if cls.w > 1: for member in client_context.ismaster['hosts']: if member != client_context.ismaster['primary']: cls.secondary = MongoClient(*partition_node(member)) break # We tested wtimeout errors by specifying a write concern greater than # the number of members, but in MongoDB 2.7.8+ this causes a different # sort of error, "Not enough data-bearing nodes". In recent servers we # use a failpoint to pause replication on a secondary. cls.need_replication_stopped = client_context.version.at_least(2, 7, 8)
def run_scenario(self): c = create_mock_topology(scenario_def['uri']) for i, phase in enumerate(scenario_def['phases']): # Including the phase description makes failures easier to debug. description = phase.get('description', str(i)) with assertion_context('phase: %s' % (description, )): for response in phase.get('responses', []): got_ismaster(c, common.partition_node(response[0]), response[1]) for app_error in phase.get('applicationErrors', []): got_app_error(c, app_error) check_outcome(self, c, phase['outcome'])
def check_outcome(self, topology, outcome): expected_servers = outcome['servers'] # Check weak equality before proceeding. self.assertEqual( len(topology.description.server_descriptions()), len(expected_servers)) # Since lengths are equal, every actual server must have a corresponding # expected server. for expected_server_address, expected_server in expected_servers.items(): node = common.partition_node(expected_server_address) self.assertTrue(topology.has_server(node)) actual_server = topology.get_server_by_address(node) actual_server_description = actual_server.description if expected_server['type'] == 'PossiblePrimary': # Special case, some tests in the spec include the PossiblePrimary # type, but only single-threaded drivers need that type. We call # possible primaries Unknown. expected_server_type = SERVER_TYPE.Unknown else: expected_server_type = getattr( SERVER_TYPE, expected_server['type']) self.assertEqual( server_type_name(expected_server_type), server_type_name(actual_server_description.server_type)) self.assertEqual( expected_server['setName'], actual_server_description.replica_set_name) self.assertEqual( expected_server.get('setVersion'), actual_server_description.set_version) self.assertEqual( expected_server.get('electionId'), actual_server_description.election_id) self.assertEqual(outcome['setName'], topology.description.replica_set_name) expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType']) self.assertEqual(topology_type_name(expected_topology_type), topology_type_name(topology.description.topology_type))
def test_primary_stepdown(self): c = MongoClient(self.seed, replicaSet=self.name, serverSelectionTimeoutMS=self.server_selection_timeout) wait_until(lambda: c.primary, "discover primary") wait_until(lambda: len(c.secondaries) == 2, "discover secondaries") ha_tools.stepdown_primary() # Wait for new primary. wait_until(lambda: (ha_tools.get_primary() and c.primary == partition_node(ha_tools.get_primary())), "discover new primary", timeout=30) wait_until(lambda: len(c.secondaries) == 2, "discover new secondaries", timeout=30)
def test_monitor_removes_recovering_member(self): self.c = MongoClient( self.seed, replicaSet=self.name, serverSelectionTimeoutMS=self.server_selection_timeout) secondaries = ha_tools.get_secondaries() for mode in SECONDARY, SECONDARY_PREFERRED: partitioned_secondaries = partition_nodes(secondaries) utils.assertReadFromAll(self, self.c, partitioned_secondaries, mode) secondary, recovering_secondary = secondaries ha_tools.set_maintenance(recovering_secondary, True) time.sleep(2 * self.heartbeat_frequency) for mode in SECONDARY, SECONDARY_PREFERRED: # Don't read from recovering member utils.assertReadFrom(self, self.c, partition_node(secondary), mode)
def test_primary_stepdown(self): c = MongoClient( self.seed, replicaSet=self.name, serverSelectionTimeoutMS=self.server_selection_timeout) wait_until(lambda: c.primary, "discover primary") wait_until(lambda: len(c.secondaries) == 2, "discover secondaries") ha_tools.stepdown_primary() # Wait for new primary. wait_until(lambda: (ha_tools.get_primary() and c.primary == partition_node(ha_tools.get_primary())), "discover new primary", timeout=30) wait_until(lambda: len(c.secondaries) == 2, "discover new secondaries", timeout=30)
def primary(self): """This server's opinion about who the primary is, or None.""" if self._doc.get('primary'): return common.partition_node(self._doc['primary']) else: return None
def __init__(self): """Create a client and grab essential information from the server.""" self.connected = False self.ismaster = {} self.w = None self.nodes = set() self.replica_set_name = None self.cmd_line = None self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False self.is_mongos = False self.is_rs = False self.has_ipv6 = False self.ssl = False self.ssl_cert_none = False self.ssl_certfile = False self.server_is_resolvable = is_server_resolvable() self.ssl_client_options = {} self.sessions_enabled = False self.client = _connect(host, port) if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? self.client = _connect(host, port, **_SSL_OPTIONS) if self.client: self.ssl = True self.ssl_client_options = _SSL_OPTIONS self.ssl_certfile = True if _SSL_OPTIONS.get('ssl_cert_reqs') == ssl.CERT_NONE: self.ssl_cert_none = True if self.client: self.connected = True try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: # See if db_user already exists. if not self._check_user_provided(): self.client.admin.add_user(db_user, db_pwd, roles=['root']) self.client = _connect(host, port, username=db_user, password=db_pwd, replicaSet=self.replica_set_name, **self.ssl_client_options) # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') self.ismaster = ismaster = self.client.admin.command('isMaster') self.sessions_enabled = 'logicalSessionTimeoutMinutes' in ismaster if 'setName' in ismaster: self.replica_set_name = ismaster['setName'] self.is_rs = True if self.auth_enabled: # It doesn't matter which member we use as the seed here. self.client = pymongo.MongoClient( host, port, username=db_user, password=db_pwd, replicaSet=self.replica_set_name, **self.ssl_client_options) else: self.client = pymongo.MongoClient( host, port, replicaSet=self.replica_set_name, **self.ssl_client_options) # Get the authoritative ismaster result from the primary. self.ismaster = self.client.admin.command('ismaster') nodes = [ partition_node(node.lower()) for node in self.ismaster.get('hosts', []) ] nodes.extend([ partition_node(node.lower()) for node in self.ismaster.get('passives', []) ]) nodes.extend([ partition_node(node.lower()) for node in self.ismaster.get('arbiters', []) ]) self.nodes = set(nodes) else: self.ismaster = ismaster self.nodes = set([(host, port)]) self.w = len(self.ismaster.get("hosts", [])) or 1 self.version = Version.from_client(self.client) if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: params = self.cmd_line['parsed'].get('setParameter', []) if 'enableTestCommands=1' in params: self.test_commands_enabled = True else: params = self.cmd_line['parsed'].get('setParameter', {}) if params.get('enableTestCommands') == '1': self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6()
def __init__(self): """Create a client and grab essential information from the server.""" self.connected = False self.ismaster = {} self.w = None self.nodes = set() self.replica_set_name = None self.rs_client = None self.cmd_line = None self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False self.is_mongos = False self.is_rs = False self.has_ipv6 = False try: client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=100) client.admin.command('ismaster') # Can we connect? # If so, then reset client to defaults. self.client = pymongo.MongoClient(host, port) except pymongo.errors.ConnectionFailure: self.client = None else: self.connected = True self.ismaster = self.client.admin.command('ismaster') self.w = len(self.ismaster.get("hosts", [])) or 1 self.nodes = set([(host, port)]) self.replica_set_name = self.ismaster.get('setName', '') self.rs_client = None self.version = Version.from_client(self.client) if self.replica_set_name: self.is_rs = True self.rs_client = pymongo.MongoClient( pair, replicaSet=self.replica_set_name) nodes = [partition_node(node) for node in self.ismaster.get('hosts', [])] nodes.extend([partition_node(node) for node in self.ismaster.get('passives', [])]) nodes.extend([partition_node(node) for node in self.ismaster.get('arbiters', [])]) self.nodes = set(nodes) self.rs_or_standalone_client = self.rs_client or self.client try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: # See if db_user already exists. self.user_provided = self._check_user_provided() if not self.user_provided: roles = {} if self.version.at_least(2, 5, 3, -1): roles = {'roles': ['root']} self.client.admin.add_user(db_user, db_pwd, **roles) self.client.admin.authenticate(db_user, db_pwd) if self.rs_client: self.rs_client.admin.authenticate(db_user, db_pwd) # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: params = self.cmd_line['parsed'].get('setParameter', []) if 'enableTestCommands=1' in params: self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6()
def primary(self) -> Optional[Tuple[str, int]]: """This server's opinion about who the primary is, or None.""" if self._doc.get("primary"): return common.partition_node(self._doc["primary"]) else: return None
def __init__(self): """Create a client and grab essential information from the server.""" self.connected = False self.ismaster = {} self.w = None self.nodes = set() self.replica_set_name = None self.rs_client = None self.cmd_line = None self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False self.is_mongos = False self.is_rs = False self.has_ipv6 = False try: client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=100) client.admin.command('ismaster') # Can we connect? # If so, then reset client to defaults. self.client = pymongo.MongoClient(host, port) except pymongo.errors.ConnectionFailure: self.client = self.rs_or_standalone_client = None else: self.connected = True self.ismaster = self.client.admin.command('ismaster') self.w = len(self.ismaster.get("hosts", [])) or 1 self.nodes = set([(host, port)]) self.replica_set_name = self.ismaster.get('setName', '') self.rs_client = None self.version = Version.from_client(self.client) if self.replica_set_name: self.is_rs = True self.rs_client = pymongo.MongoClient( pair, replicaSet=self.replica_set_name) nodes = [ partition_node(node.lower()) for node in self.ismaster.get('hosts', []) ] nodes.extend([ partition_node(node.lower()) for node in self.ismaster.get('passives', []) ]) nodes.extend([ partition_node(node.lower()) for node in self.ismaster.get('arbiters', []) ]) self.nodes = set(nodes) self.rs_or_standalone_client = self.rs_client or self.client try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: # See if db_user already exists. self.user_provided = self._check_user_provided() if not self.user_provided: roles = {} if self.version.at_least(2, 5, 3, -1): roles = {'roles': ['root']} self.client.admin.add_user(db_user, db_pwd, **roles) self.client.admin.authenticate(db_user, db_pwd) if self.rs_client: self.rs_client.admin.authenticate(db_user, db_pwd) # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: params = self.cmd_line['parsed'].get('setParameter', []) if 'enableTestCommands=1' in params: self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6()
def partition_nodes(nodes): """Translate from ['host:port', ...] to [(host, port), ...]""" return [partition_node(node) for node in nodes]
def __init__(self): """Create a client and grab essential information from the server.""" # Seed host. This may be updated further down. self.host, self.port = host, port self.connected = False self.ismaster = {} self.w = None self.nodes = set() self.replica_set_name = None self.rs_client = None self.cmd_line = None self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False self.is_mongos = False self.is_rs = False self.has_ipv6 = False self.ssl_cert_none = False self.ssl_certfile = False self.server_is_resolvable = is_server_resolvable() self.client = self.rs_or_standalone_client = None def connect(**kwargs): try: client = pymongo.MongoClient(self.host, self.port, serverSelectionTimeoutMS=100, **kwargs) client.admin.command('ismaster') # Can we connect? # If connected, then return client with default timeout return pymongo.MongoClient(self.host, self.port, **kwargs) except pymongo.errors.ConnectionFailure: return None self.client = connect() if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? self.client = connect(ssl=True, ssl_cert_reqs=ssl.CERT_NONE) if self.client: self.ssl_cert_none = True # Can client connect with certfile? client = connect( ssl=True, ssl_cert_reqs=ssl.CERT_NONE, ssl_certfile=CLIENT_PEM, ) if client: self.ssl_certfile = True self.client = client if self.client: self.connected = True self.ismaster = self.client.admin.command('ismaster') self.w = len(self.ismaster.get("hosts", [])) or 1 self.nodes = set([(self.host, self.port)]) self.replica_set_name = self.ismaster.get('setName', '') self.rs_client = None self.version = Version.from_client(self.client) if self.replica_set_name: self.is_rs = True self.rs_client = pymongo.MongoClient( self.ismaster['primary'], replicaSet=self.replica_set_name) # Force connection self.rs_client.admin.command('ismaster') self.host, self.port = self.rs_client.primary self.client = connect() nodes = [ partition_node(node.lower()) for node in self.ismaster.get('hosts', []) ] nodes.extend([ partition_node(node.lower()) for node in self.ismaster.get('passives', []) ]) nodes.extend([ partition_node(node.lower()) for node in self.ismaster.get('arbiters', []) ]) self.nodes = set(nodes) self.rs_or_standalone_client = self.rs_client or self.client try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: # See if db_user already exists. self.user_provided = self._check_user_provided() if not self.user_provided: roles = {} if self.version.at_least(2, 5, 3, -1): roles = {'roles': ['root']} self.client.admin.add_user(db_user, db_pwd, **roles) self.client.admin.authenticate(db_user, db_pwd) if self.rs_client: self.rs_client.admin.authenticate(db_user, db_pwd) # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: params = self.cmd_line['parsed'].get('setParameter', []) if 'enableTestCommands=1' in params: self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6() # Do this after we connect so we know who the primary is. self.pair = "%s:%d" % (self.host, self.port)
def _init_client(self): self.client = self._connect(host, port) if self.client is not None: # Return early when connected to dataLake as mongohoused does not # support the getCmdLineOpts command and is tested without TLS. build_info = self.client.admin.command('buildInfo') if 'dataLake' in build_info: self.is_data_lake = True self.auth_enabled = True self.client = self._connect(host, port, username=db_user, password=db_pwd) self.connected = True return if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? self.client = self._connect(host, port, **TLS_OPTIONS) if self.client: self.tls = True self.default_client_options.update(TLS_OPTIONS) self.ssl_certfile = True if self.client: self.connected = True try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: # See if db_user already exists. if not self._check_user_provided(): _create_user(self.client.admin, db_user, db_pwd) self.client = self._connect(host, port, username=db_user, password=db_pwd, replicaSet=self.replica_set_name, **self.default_client_options) # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') self.server_status = self.client.admin.command('serverStatus') if self.storage_engine == "mmapv1": # MMAPv1 does not support retryWrites=True. self.default_client_options['retryWrites'] = False ismaster = self.ismaster self.sessions_enabled = 'logicalSessionTimeoutMinutes' in ismaster if 'setName' in ismaster: self.replica_set_name = str(ismaster['setName']) self.is_rs = True if self.auth_enabled: # It doesn't matter which member we use as the seed here. self.client = pymongo.MongoClient( host, port, username=db_user, password=db_pwd, replicaSet=self.replica_set_name, **self.default_client_options) else: self.client = pymongo.MongoClient( host, port, replicaSet=self.replica_set_name, **self.default_client_options) # Get the authoritative ismaster result from the primary. ismaster = self.ismaster nodes = [ partition_node(node.lower()) for node in ismaster.get('hosts', []) ] nodes.extend([ partition_node(node.lower()) for node in ismaster.get('passives', []) ]) nodes.extend([ partition_node(node.lower()) for node in ismaster.get('arbiters', []) ]) self.nodes = set(nodes) else: self.nodes = set([(host, port)]) self.w = len(ismaster.get("hosts", [])) or 1 self.version = Version.from_client(self.client) self.server_parameters = self.client.admin.command( 'getParameter', '*') if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: params = self.cmd_line['parsed'].get('setParameter', []) if 'enableTestCommands=1' in params: self.test_commands_enabled = True else: params = self.cmd_line['parsed'].get('setParameter', {}) if params.get('enableTestCommands') == '1': self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6() if self.is_mongos: # Check for another mongos on the next port. address = self.client.address next_address = address[0], address[1] + 1 self.mongoses.append(address) mongos_client = self._connect(*next_address, **self.default_client_options) if mongos_client: ismaster = mongos_client.admin.command('ismaster') if ismaster.get('msg') == 'isdbgrid': self.mongoses.append(next_address)
def __init__(self): """Create a client and grab essential information from the server.""" self.connected = False self.ismaster = {} self.w = None self.nodes = set() self.replica_set_name = None self.cmd_line = None self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False self.is_mongos = False self.is_rs = False self.has_ipv6 = False self.ssl = False self.ssl_cert_none = False self.ssl_certfile = False self.server_is_resolvable = is_server_resolvable() self.ssl_client_options = {} self.client = _connect(host, port) if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? self.client = _connect(host, port, **_SSL_OPTIONS) if self.client: self.ssl = True self.ssl_client_options = _SSL_OPTIONS self.ssl_certfile = True if _SSL_OPTIONS.get('ssl_cert_reqs') == ssl.CERT_NONE: self.ssl_cert_none = True if self.client: self.connected = True ismaster = self.client.admin.command('ismaster') if 'setName' in ismaster: self.replica_set_name = ismaster['setName'] self.is_rs = True # It doesn't matter which member we use as the seed here. self.client = pymongo.MongoClient( host, port, replicaSet=self.replica_set_name, **self.ssl_client_options) # Get the authoritative ismaster result from the primary. self.ismaster = self.client.admin.command('ismaster') nodes = [partition_node(node.lower()) for node in self.ismaster.get('hosts', [])] nodes.extend([partition_node(node.lower()) for node in self.ismaster.get('passives', [])]) nodes.extend([partition_node(node.lower()) for node in self.ismaster.get('arbiters', [])]) self.nodes = set(nodes) else: self.ismaster = ismaster self.nodes = set([(host, port)]) self.w = len(self.ismaster.get("hosts", [])) or 1 self.version = Version.from_client(self.client) try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: # See if db_user already exists. self.user_provided = self._check_user_provided() if not self.user_provided: roles = {} if self.version.at_least(2, 5, 3, -1): roles = {'roles': ['root']} self.client.admin.add_user(db_user, db_pwd, **roles) self.client.admin.authenticate(db_user, db_pwd) # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: params = self.cmd_line['parsed'].get('setParameter', []) if 'enableTestCommands=1' in params: self.test_commands_enabled = True else: params = self.cmd_line['parsed'].get('setParameter', {}) if params.get('enableTestCommands') == '1': self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6()
def _init_client(self): self.client = self._connect(host, port) if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? self.client = self._connect(host, port, **_SSL_OPTIONS) if self.client: self.ssl = True self.default_client_options.update(_SSL_OPTIONS) self.ssl_certfile = True if _SSL_OPTIONS.get('ssl_cert_reqs') == ssl.CERT_NONE: self.ssl_cert_none = True if self.client: self.connected = True try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: # See if db_user already exists. if not self._check_user_provided(): _create_user(self.client.admin, db_user, db_pwd) self.client = self._connect( host, port, username=db_user, password=db_pwd, replicaSet=self.replica_set_name, **self.default_client_options) # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') self.server_status = self.client.admin.command('serverStatus') self.ismaster = ismaster = self.client.admin.command('isMaster') self.sessions_enabled = 'logicalSessionTimeoutMinutes' in ismaster if 'setName' in ismaster: self.replica_set_name = str(ismaster['setName']) self.is_rs = True if self.auth_enabled: # It doesn't matter which member we use as the seed here. self.client = pymongo.MongoClient( host, port, username=db_user, password=db_pwd, replicaSet=self.replica_set_name, **self.default_client_options) else: self.client = pymongo.MongoClient( host, port, replicaSet=self.replica_set_name, **self.default_client_options) # Get the authoritative ismaster result from the primary. self.ismaster = self.client.admin.command('ismaster') nodes = [partition_node(node.lower()) for node in self.ismaster.get('hosts', [])] nodes.extend([partition_node(node.lower()) for node in self.ismaster.get('passives', [])]) nodes.extend([partition_node(node.lower()) for node in self.ismaster.get('arbiters', [])]) self.nodes = set(nodes) else: self.ismaster = ismaster self.nodes = set([(host, port)]) self.w = len(self.ismaster.get("hosts", [])) or 1 self.version = Version.from_client(self.client) if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: params = self.cmd_line['parsed'].get('setParameter', []) if 'enableTestCommands=1' in params: self.test_commands_enabled = True else: params = self.cmd_line['parsed'].get('setParameter', {}) if params.get('enableTestCommands') == '1': self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6() if self.is_mongos: # Check for another mongos on the next port. address = self.client.address next_address = address[0], address[1] + 1 self.mongoses.append(address) mongos_client = self._connect(*next_address, **self.default_client_options) if mongos_client: ismaster = mongos_client.admin.command('ismaster') if ismaster.get('msg') == 'isdbgrid': self.mongoses.append(next_address)
def __init__(self): """Create a client and grab essential information from the server.""" # Seed host. This may be updated further down. self.host, self.port = host, port self.connected = False self.ismaster = {} self.w = None self.nodes = set() self.replica_set_name = None self.cmd_line = None self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False self.is_mongos = False self.is_rs = False self.has_ipv6 = False self.ssl = False self.ssl_cert_none = False self.ssl_certfile = False self.server_is_resolvable = is_server_resolvable() self.ssl_client_options = {} self.client = _connect(self.host, self.port) if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? self.client = _connect(self.host, self.port, **_SSL_OPTIONS) if self.client: self.ssl = True self.ssl_client_options = _SSL_OPTIONS self.ssl_certfile = True if _SSL_OPTIONS.get('ssl_cert_reqs') == ssl.CERT_NONE: self.ssl_cert_none = True if self.client: self.connected = True self.ismaster = self.client.admin.command('ismaster') self.w = len(self.ismaster.get("hosts", [])) or 1 self.nodes = set([(self.host, self.port)]) self.replica_set_name = self.ismaster.get('setName', '') self.version = Version.from_client(self.client) if self.replica_set_name: self.is_rs = True self.client = pymongo.MongoClient( self.ismaster['primary'], replicaSet=self.replica_set_name, **self.ssl_client_options) # Force connection self.client.admin.command('ismaster') self.host, self.port = self.client.primary nodes = [partition_node(node.lower()) for node in self.ismaster.get('hosts', [])] nodes.extend([partition_node(node.lower()) for node in self.ismaster.get('passives', [])]) nodes.extend([partition_node(node.lower()) for node in self.ismaster.get('arbiters', [])]) self.nodes = set(nodes) try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: # See if db_user already exists. self.user_provided = self._check_user_provided() if not self.user_provided: roles = {} if self.version.at_least(2, 5, 3, -1): roles = {'roles': ['root']} self.client.admin.add_user(db_user, db_pwd, **roles) self.client.admin.authenticate(db_user, db_pwd) # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: params = self.cmd_line['parsed'].get('setParameter', []) if 'enableTestCommands=1' in params: self.test_commands_enabled = True else: params = self.cmd_line['parsed'].get('setParameter', {}) if params.get('enableTestCommands') == '1': self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6()