class TestCommandReplication(unittest.TestCase): def setUp(self): self.repl_set = ReplicaSet().start() self.primary_conn = self.repl_set.client() self.oplog_progress = LockingDict() self.opman = None def tearDown(self): try: if self.opman: self.opman.join() except RuntimeError: pass self.primary_conn.close() self.repl_set.stop() def initOplogThread(self, namespace_set=[], dest_mapping={}): self.docman = CommandLoggerDocManager() self.docman.command_helper = CommandHelper(namespace_set, dest_mapping) self.opman = OplogThread(primary_client=self.primary_conn, doc_managers=(self.docman, ), oplog_progress_dict=self.oplog_progress, namespace_set=namespace_set, dest_mapping=dest_mapping, collection_dump=False) self.opman.start() def test_command_helper(self): # Databases cannot be merged mapping = {'a.x': 'c.x', 'b.x': 'c.y'} self.assertRaises(errors.MongoConnectorError, CommandHelper, list(mapping), mapping) mapping = {'a.x': 'b.x', 'a.y': 'c.y'} helper = CommandHelper(list(mapping) + ['a.z'], mapping) self.assertEqual(set(helper.map_db('a')), set(['a', 'b', 'c'])) self.assertEqual(helper.map_db('d'), []) self.assertEqual(helper.map_namespace('a.x'), 'b.x') self.assertEqual(helper.map_namespace('a.z'), 'a.z') self.assertEqual(helper.map_namespace('d.x'), None) self.assertEqual(helper.map_collection('a', 'x'), ('b', 'x')) self.assertEqual(helper.map_collection('a', 'z'), ('a', 'z')) self.assertEqual(helper.map_collection('d', 'x'), (None, None)) def test_create_collection(self): self.initOplogThread() pymongo.collection.Collection(self.primary_conn['test'], 'test', create=True) assert_soon(lambda: self.docman.commands) self.assertEqual(self.docman.commands[0], {'create': 'test'}) def test_create_collection_skipped(self): self.initOplogThread(['test.test']) pymongo.collection.Collection(self.primary_conn['test2'], 'test2', create=True) pymongo.collection.Collection(self.primary_conn['test'], 'test', create=True) assert_soon(lambda: self.docman.commands) self.assertEqual(len(self.docman.commands), 1) self.assertEqual(self.docman.commands[0], {'create': 'test'}) def test_drop_collection(self): self.initOplogThread() coll = pymongo.collection.Collection(self.primary_conn['test'], 'test', create=True) coll.drop() assert_soon(lambda: len(self.docman.commands) == 2) self.assertEqual(self.docman.commands[1], {'drop': 'test'}) def test_drop_database(self): self.initOplogThread() pymongo.collection.Collection(self.primary_conn['test'], 'test', create=True) self.primary_conn.drop_database('test') assert_soon(lambda: len(self.docman.commands) == 2) self.assertEqual(self.docman.commands[1], {'dropDatabase': 1}) def test_rename_collection(self): self.initOplogThread() coll = pymongo.collection.Collection(self.primary_conn['test'], 'test', create=True) coll.rename('test2') assert_soon(lambda: len(self.docman.commands) == 2) self.assertEqual(self.docman.commands[1], { 'renameCollection': 'test.test', 'to': 'test.test2' })
class TestRollbacks(unittest.TestCase): def tearDown(self): close_client(self.primary_conn) close_client(self.secondary_conn) self.repl_set.stop() def setUp(self): # Create a new oplog progress file try: os.unlink("oplog.timestamp") except OSError: pass open("oplog.timestamp", "w").close() # Start a replica set self.repl_set = ReplicaSet().start() # Connection to the replica set as a whole self.main_conn = self.repl_set.client() # Connection to the primary specifically self.primary_conn = self.repl_set.primary.client() # Connection to the secondary specifically self.secondary_conn = self.repl_set.secondary.client( read_preference=ReadPreference.SECONDARY_PREFERRED) # Wipe any test data self.main_conn["test"]["mc"].drop() # Oplog thread doc_manager = DocManager() oplog_progress = LockingDict() self.opman = OplogThread( primary_client=self.main_conn, doc_managers=(doc_manager,), oplog_progress_dict=oplog_progress, ns_set=["test.mc"] ) def test_single_target(self): """Test with a single replication target""" self.opman.start() # Insert first document with primary up self.main_conn["test"]["mc"].insert_one({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn assert_soon(lambda: secondary["test"]["mc"].count() == 1, "first write didn't replicate to secondary") # Kill the primary self.repl_set.primary.stop(destroy=False) # Wait for the secondary to be promoted assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"]) # Insert another document. This will be rolled back later retry_until_ok(self.main_conn["test"]["mc"].insert_one, {"i": 1}) self.assertEqual(secondary["test"]["mc"].count(), 2) # Wait for replication to doc manager assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2, "not all writes were replicated to doc manager") # Kill the new primary self.repl_set.secondary.stop(destroy=False) # Start both servers back up self.repl_set.primary.start() primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")["ismaster"], "restarted primary never resumed primary status") self.repl_set.secondary.start() assert_soon(lambda: retry_until_ok(secondary.admin.command, 'replSetGetStatus')['myState'] == 2, "restarted secondary never resumed secondary status") assert_soon(lambda: retry_until_ok(self.main_conn.test.mc.find().count) > 0, "documents not found after primary/secondary restarted") # Only first document should exist in MongoDB self.assertEqual(self.main_conn["test"]["mc"].count(), 1) self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0) # Same case should hold for the doc manager doc_manager = self.opman.doc_managers[0] assert_soon(lambda: len(doc_manager._search()) == 1, 'documents never rolled back in doc manager.') self.assertEqual(doc_manager._search()[0]["i"], 0) # cleanup self.opman.join() def test_many_targets(self): """Test with several replication targets""" # OplogThread has multiple doc managers doc_managers = [DocManager(), DocManager(), DocManager()] self.opman.doc_managers = doc_managers self.opman.start() # Insert a document into each namespace self.main_conn["test"]["mc"].insert_one({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn assert_soon(lambda: secondary["test"]["mc"].count() == 1, "first write didn't replicate to secondary") # Kill the primary self.repl_set.primary.stop(destroy=False) # Wait for the secondary to be promoted assert_soon(lambda: secondary.admin.command("isMaster")['ismaster'], 'secondary was never promoted') # Insert more documents. This will be rolled back later # Some of these documents will be manually removed from # certain doc managers, to emulate the effect of certain # target systems being ahead/behind others secondary_ids = [] for i in range(1, 10): secondary_ids.append( retry_until_ok(self.main_conn["test"]["mc"].insert_one, {"i": i}).inserted_id) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10) # Wait for replication to the doc managers def docmans_done(): for dm in self.opman.doc_managers: if len(dm._search()) != 10: return False return True assert_soon(docmans_done, "not all writes were replicated to doc managers") # Remove some documents from the doc managers to simulate # uneven replication ts = self.opman.doc_managers[0].get_last_doc()['_ts'] for id in secondary_ids[8:]: self.opman.doc_managers[1].remove(id, 'test.mc', ts) for id in secondary_ids[2:]: self.opman.doc_managers[2].remove(id, 'test.mc', ts) # Kill the new primary self.repl_set.secondary.stop(destroy=False) # Start both servers back up self.repl_set.primary.start() primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")['ismaster'], 'restarted primary never resumed primary status') self.repl_set.secondary.start() assert_soon(lambda: retry_until_ok(secondary.admin.command, 'replSetGetStatus')['myState'] == 2, "restarted secondary never resumed secondary status") assert_soon(lambda: retry_until_ok(self.primary_conn.test.mc.find().count) > 0, "documents not found after primary/secondary restarted") # Only first document should exist in MongoDB self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0) # Give OplogThread some time to catch up time.sleep(10) # Same case should hold for the doc managers for dm in self.opman.doc_managers: self.assertEqual(len(dm._search()), 1) self.assertEqual(dm._search()[0]["i"], 0) self.opman.join() def test_deletions(self): """Test rolling back 'd' operations""" self.opman.start() # Insert a document, wait till it replicates to secondary self.main_conn["test"]["mc"].insert_one({"i": 0}) self.main_conn["test"]["mc"].insert_one({"i": 1}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 2) assert_soon(lambda: self.secondary_conn["test"]["mc"].count() == 2, "first write didn't replicate to secondary") # Kill the primary, wait for secondary to be promoted self.repl_set.primary.stop(destroy=False) assert_soon(lambda: self.secondary_conn["admin"] .command("isMaster")["ismaster"]) # Delete first document retry_until_ok(self.main_conn["test"]["mc"].delete_one, {"i": 0}) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 1) # Wait for replication to doc manager assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 1, "delete was not replicated to doc manager") # Kill the new primary self.repl_set.secondary.stop(destroy=False) # Start both servers back up self.repl_set.primary.start() primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")["ismaster"], "restarted primary never resumed primary status") self.repl_set.secondary.start() assert_soon(lambda: retry_until_ok(self.secondary_conn.admin.command, 'replSetGetStatus')['myState'] == 2, "restarted secondary never resumed secondary status") # Both documents should exist in mongo assert_soon(lambda: retry_until_ok( self.main_conn["test"]["mc"].count) == 2) # Both document should exist in doc manager doc_manager = self.opman.doc_managers[0] assert_soon(lambda: len(list(doc_manager._search())) == 2, ("Expected two documents, but got: %r" % list(doc_manager._search()))) self.opman.join() def test_stressed_rollback(self): """Stress test for a rollback with many documents.""" self.opman.start() c = self.main_conn.test.mc docman = self.opman.doc_managers[0] c2 = c.with_options(write_concern=WriteConcern(w=2)) c2.insert_many([{'i': i} for i in range(STRESS_COUNT)]) assert_soon(lambda: c2.count() == STRESS_COUNT) condition = lambda: len(docman._search()) == STRESS_COUNT assert_soon(condition, ("Was expecting %d documents in DocManager, " "but %d found instead." % (STRESS_COUNT, len(docman._search())))) primary_conn = self.repl_set.primary.client() self.repl_set.primary.stop(destroy=False) new_primary_conn = self.repl_set.secondary.client() admin = new_primary_conn.admin assert_soon( lambda: retry_until_ok(admin.command, "isMaster")['ismaster']) retry_until_ok(c.insert_many, [{'i': str(STRESS_COUNT + i)} for i in range(STRESS_COUNT)]) self.repl_set.secondary.stop(destroy=False) self.repl_set.primary.start() admin = primary_conn.admin assert_soon( lambda: retry_until_ok(admin.command, "isMaster")['ismaster']) self.repl_set.secondary.start() assert_soon(lambda: retry_until_ok(c.count) == STRESS_COUNT) assert_soon(condition, ("Was expecting %d documents in DocManager, " "but %d found instead." % (STRESS_COUNT, len(docman._search())))) self.opman.join()
class TestRollbacks(unittest.TestCase): def tearDown(self): self.repl_set.stop() def setUp(self): # Create a new oplog progress file try: os.unlink("oplog.timestamp") except OSError: pass open("oplog.timestamp", "w").close() # Start a replica set self.repl_set = ReplicaSet().start() # Connection to the replica set as a whole self.main_conn = self.repl_set.client() # Connection to the primary specifically self.primary_conn = self.repl_set.primary.client() # Connection to the secondary specifically self.secondary_conn = self.repl_set.secondary.client( read_preference=ReadPreference.SECONDARY_PREFERRED) # Wipe any test data self.main_conn["test"]["mc"].drop() # Oplog thread doc_manager = DocManager() oplog_progress = LockingDict() self.opman = OplogThread(primary_client=self.main_conn, doc_managers=(doc_manager, ), oplog_progress_dict=oplog_progress, ns_set=["test.mc"]) def test_single_target(self): """Test with a single replication target""" self.opman.start() # Insert first document with primary up self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn assert_soon(lambda: secondary["test"]["mc"].count() == 1, "first write didn't replicate to secondary") # Kill the primary self.repl_set.primary.stop(destroy=False) # Wait for the secondary to be promoted assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"]) # Insert another document. This will be rolled back later retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1}) self.assertEqual(secondary["test"]["mc"].count(), 2) # Wait for replication to doc manager assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2, "not all writes were replicated to doc manager") # Kill the new primary self.repl_set.secondary.stop(destroy=False) # Start both servers back up self.repl_set.primary.start() primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")["ismaster"], "restarted primary never resumed primary status") self.repl_set.secondary.start() assert_soon( lambda: retry_until_ok(secondary.admin.command, 'replSetGetStatus' )['myState'] == 2, "restarted secondary never resumed secondary status") assert_soon( lambda: retry_until_ok(self.main_conn.test.mc.find().count) > 0, "documents not found after primary/secondary restarted") # Only first document should exist in MongoDB self.assertEqual(self.main_conn["test"]["mc"].count(), 1) self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0) # Same case should hold for the doc manager doc_manager = self.opman.doc_managers[0] assert_soon(lambda: len(doc_manager._search()) == 1, 'documents never rolled back in doc manager.') self.assertEqual(doc_manager._search()[0]["i"], 0) # cleanup self.opman.join() def test_many_targets(self): """Test with several replication targets""" # OplogThread has multiple doc managers doc_managers = [DocManager(), DocManager(), DocManager()] self.opman.doc_managers = doc_managers self.opman.start() # Insert a document into each namespace self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn assert_soon(lambda: secondary["test"]["mc"].count() == 1, "first write didn't replicate to secondary") # Kill the primary self.repl_set.primary.stop(destroy=False) # Wait for the secondary to be promoted assert_soon(lambda: secondary.admin.command("isMaster")['ismaster'], 'secondary was never promoted') # Insert more documents. This will be rolled back later # Some of these documents will be manually removed from # certain doc managers, to emulate the effect of certain # target systems being ahead/behind others secondary_ids = [] for i in range(1, 10): secondary_ids.append( retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": i})) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10) # Wait for replication to the doc managers def docmans_done(): for dm in self.opman.doc_managers: if len(dm._search()) != 10: return False return True assert_soon(docmans_done, "not all writes were replicated to doc managers") # Remove some documents from the doc managers to simulate # uneven replication ts = self.opman.doc_managers[0].get_last_doc()['_ts'] for id in secondary_ids[8:]: self.opman.doc_managers[1].remove(id, 'test.mc', ts) for id in secondary_ids[2:]: self.opman.doc_managers[2].remove(id, 'test.mc', ts) # Kill the new primary self.repl_set.secondary.stop(destroy=False) # Start both servers back up self.repl_set.primary.start() primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")['ismaster'], 'restarted primary never resumed primary status') self.repl_set.secondary.start() assert_soon( lambda: retry_until_ok(secondary.admin.command, 'replSetGetStatus' )['myState'] == 2, "restarted secondary never resumed secondary status") assert_soon( lambda: retry_until_ok(self.primary_conn.test.mc.find().count) > 0, "documents not found after primary/secondary restarted") # Only first document should exist in MongoDB self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0) # Give OplogThread some time to catch up time.sleep(10) # Same case should hold for the doc managers for dm in self.opman.doc_managers: self.assertEqual(len(dm._search()), 1) self.assertEqual(dm._search()[0]["i"], 0) self.opman.join() def test_deletions(self): """Test rolling back 'd' operations""" self.opman.start() # Insert a document, wait till it replicates to secondary self.main_conn["test"]["mc"].insert({"i": 0}) self.main_conn["test"]["mc"].insert({"i": 1}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 2) assert_soon(lambda: self.secondary_conn["test"]["mc"].count() == 2, "first write didn't replicate to secondary") # Kill the primary, wait for secondary to be promoted self.repl_set.primary.stop(destroy=False) assert_soon(lambda: self.secondary_conn["admin"].command("isMaster")[ "ismaster"]) # Delete first document retry_until_ok(self.main_conn["test"]["mc"].remove, {"i": 0}) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 1) # Wait for replication to doc manager assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 1, "delete was not replicated to doc manager") # Kill the new primary self.repl_set.secondary.stop(destroy=False) # Start both servers back up self.repl_set.primary.start() primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")["ismaster"], "restarted primary never resumed primary status") self.repl_set.secondary.start() assert_soon( lambda: retry_until_ok(self.secondary_conn.admin.command, 'replSetGetStatus')['myState'] == 2, "restarted secondary never resumed secondary status") # Both documents should exist in mongo assert_soon( lambda: retry_until_ok(self.main_conn["test"]["mc"].count) == 2) # Both document should exist in doc manager doc_manager = self.opman.doc_managers[0] assert_soon(lambda: len(list(doc_manager._search())) == 2, ("Expected two documents, but got: %r" % list(doc_manager._search()))) self.opman.join() def test_stressed_rollback(self): """Stress test for a rollback with many documents.""" self.opman.start() c = self.main_conn.test.mc docman = self.opman.doc_managers[0] c.insert(({'i': i} for i in range(STRESS_COUNT)), w=2) assert_soon(lambda: c.count() == STRESS_COUNT) condition = lambda: len(docman._search()) == STRESS_COUNT assert_soon(condition, ("Was expecting %d documents in DocManager, " "but %d found instead." % (STRESS_COUNT, len(docman._search())))) primary_conn = self.repl_set.primary.client() self.repl_set.primary.stop(destroy=False) new_primary_conn = self.repl_set.secondary.client() admin = new_primary_conn.admin assert_soon( lambda: retry_until_ok(admin.command, "isMaster")['ismaster']) retry_until_ok(c.insert, [{ 'i': str(STRESS_COUNT + i) } for i in range(STRESS_COUNT)]) self.repl_set.secondary.stop(destroy=False) self.repl_set.primary.start() admin = primary_conn.admin assert_soon( lambda: retry_until_ok(admin.command, "isMaster")['ismaster']) self.repl_set.secondary.start() assert_soon(lambda: retry_until_ok(c.count) == STRESS_COUNT) assert_soon(condition, ("Was expecting %d documents in DocManager, " "but %d found instead." % (STRESS_COUNT, len(docman._search())))) self.opman.join()
class TestOplogManager(unittest.TestCase): """Defines all the testing methods, as well as a method that sets up the cluster """ def setUp(self): self.repl_set = ReplicaSet().start() self.primary_conn = self.repl_set.client() self.oplog_coll = self.primary_conn.local['oplog.rs'] self.opman = OplogThread( primary_client=self.primary_conn, doc_managers=(DocManager(),), oplog_progress_dict=LockingDict() ) def tearDown(self): try: self.opman.join() except RuntimeError: pass # OplogThread may not have been started self.primary_conn.drop_database("test") close_client(self.primary_conn) self.repl_set.stop() def test_get_oplog_cursor(self): '''Test the get_oplog_cursor method''' # timestamp is None - all oplog entries are returned. cursor = self.opman.get_oplog_cursor(None) self.assertEqual(cursor.count(), self.primary_conn["local"]["oplog.rs"].count()) # earliest entry is the only one at/after timestamp doc = {"ts": bson.Timestamp(1000, 0), "i": 1} self.primary_conn["test"]["test"].insert_one(doc) latest_timestamp = self.opman.get_last_oplog_timestamp() cursor = self.opman.get_oplog_cursor(latest_timestamp) self.assertNotEqual(cursor, None) self.assertEqual(cursor.count(), 1) next_entry_id = next(cursor)['o']['_id'] retrieved = self.primary_conn.test.test.find_one(next_entry_id) self.assertEqual(retrieved, doc) # many entries before and after timestamp self.primary_conn["test"]["test"].insert_many( [{"i": i} for i in range(2, 1002)]) oplog_cursor = self.oplog_coll.find( {'op': {'$ne': 'n'}, 'ns': {'$not': re.compile(r'\.(system|\$cmd)')}}, sort=[("ts", pymongo.ASCENDING)] ) # initial insert + 1000 more inserts self.assertEqual(oplog_cursor.count(), 1 + 1000) pivot = oplog_cursor.skip(400).limit(-1)[0] goc_cursor = self.opman.get_oplog_cursor(pivot["ts"]) self.assertEqual(goc_cursor.count(), 1 + 1000 - 400) def test_get_last_oplog_timestamp(self): """Test the get_last_oplog_timestamp method""" # "empty" the oplog self.opman.oplog = self.primary_conn["test"]["emptycollection"] self.assertEqual(self.opman.get_last_oplog_timestamp(), None) # Test non-empty oplog self.opman.oplog = self.primary_conn["local"]["oplog.rs"] for i in range(1000): self.primary_conn["test"]["test"].insert_one({ "i": i + 500 }) oplog = self.primary_conn["local"]["oplog.rs"] oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0] self.assertEqual(self.opman.get_last_oplog_timestamp(), oplog["ts"]) def test_dump_collection(self): """Test the dump_collection method Cases: 1. empty oplog 2. non-empty oplog """ # Test with empty oplog self.opman.oplog = self.primary_conn["test"]["emptycollection"] last_ts = self.opman.dump_collection() self.assertEqual(last_ts, None) # Test with non-empty oplog self.opman.oplog = self.primary_conn["local"]["oplog.rs"] for i in range(1000): self.primary_conn["test"]["test"].insert_one({ "i": i + 500 }) last_ts = self.opman.get_last_oplog_timestamp() self.assertEqual(last_ts, self.opman.dump_collection()) self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000) def test_dump_collection_with_error(self): """Test the dump_collection method with invalid documents. Cases: 1. non-empty oplog, continue_on_error=True, invalid documents """ # non-empty oplog, continue_on_error=True, invalid documents self.opman.continue_on_error = True self.opman.oplog = self.primary_conn["local"]["oplog.rs"] docs = [{'a': i} for i in range(100)] for i in range(50, 60): docs[i]['_upsert_exception'] = True self.primary_conn['test']['test'].insert_many(docs) last_ts = self.opman.get_last_oplog_timestamp() self.assertEqual(last_ts, self.opman.dump_collection()) docs = self.opman.doc_managers[0]._search() docs.sort(key=lambda doc: doc['a']) self.assertEqual(len(docs), 90) expected_a = itertools.chain(range(0, 50), range(60, 100)) for doc, correct_a in zip(docs, expected_a): self.assertEqual(doc['a'], correct_a) def test_init_cursor(self): """Test the init_cursor method Cases: 1. no last checkpoint, no collection dump 2. no last checkpoint, collection dump ok and stuff to dump 3. no last checkpoint, nothing to dump, stuff in oplog 4. no last checkpoint, nothing to dump, nothing in oplog 5. no last checkpoint, no collection dump, stuff in oplog 6. last checkpoint exists 7. last checkpoint is behind """ # N.B. these sub-cases build off of each other and cannot be re-ordered # without side-effects # No last checkpoint, no collection dump, nothing in oplog # "change oplog collection" to put nothing in oplog self.opman.oplog = self.primary_conn["test"]["emptycollection"] self.opman.collection_dump = False self.assertTrue(all(doc['op'] == 'n' for doc in self.opman.init_cursor()[0])) self.assertEqual(self.opman.checkpoint, None) # No last checkpoint, empty collections, nothing in oplog self.opman.collection_dump = True cursor, cursor_len = self.opman.init_cursor() self.assertEqual(cursor, None) self.assertEqual(cursor_len, 0) self.assertEqual(self.opman.checkpoint, None) # No last checkpoint, empty collections, something in oplog self.opman.oplog = self.primary_conn['local']['oplog.rs'] collection = self.primary_conn["test"]["test"] collection.insert_one({"i": 1}) collection.delete_one({"i": 1}) time.sleep(3) last_ts = self.opman.get_last_oplog_timestamp() cursor, cursor_len = self.opman.init_cursor() self.assertEqual(cursor_len, 0) self.assertEqual(self.opman.checkpoint, last_ts) with self.opman.oplog_progress as prog: self.assertEqual(prog.get_dict()[str(self.opman.oplog)], last_ts) # No last checkpoint, no collection dump, something in oplog self.opman.oplog_progress = LockingDict() self.opman.collection_dump = False collection.insert_one({"i": 2}) last_ts = self.opman.get_last_oplog_timestamp() cursor, cursor_len = self.opman.init_cursor() for i in range(cursor_len - 1): next(cursor) self.assertEqual(next(cursor)['o']['i'], 2) self.assertEqual(self.opman.checkpoint, last_ts) # Last checkpoint exists progress = LockingDict() self.opman.oplog_progress = progress for i in range(1000): collection.insert_one({"i": i + 500}) entry = list( self.primary_conn["local"]["oplog.rs"].find(skip=200, limit=-2)) progress.get_dict()[str(self.opman.oplog)] = entry[0]["ts"] self.opman.oplog_progress = progress self.opman.checkpoint = None cursor, cursor_len = self.opman.init_cursor() self.assertEqual(next(cursor)["ts"], entry[1]["ts"]) self.assertEqual(self.opman.checkpoint, entry[0]["ts"]) with self.opman.oplog_progress as prog: self.assertEqual(prog.get_dict()[str(self.opman.oplog)], entry[0]["ts"]) # Last checkpoint is behind progress = LockingDict() progress.get_dict()[str(self.opman.oplog)] = bson.Timestamp(1, 0) self.opman.oplog_progress = progress self.opman.checkpoint = None cursor, cursor_len = self.opman.init_cursor() self.assertEqual(cursor_len, 0) self.assertEqual(cursor, None) self.assertIsNotNone(self.opman.checkpoint) def test_filter_fields(self): docman = self.opman.doc_managers[0] conn = self.opman.primary_client include_fields = ["a", "b", "c"] exclude_fields = ["d", "e", "f"] # Set fields to care about self.opman.fields = include_fields # Documents have more than just these fields doc = { "a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "_id": 1 } db = conn['test']['test'] db.insert_one(doc) assert_soon(lambda: db.count() == 1) self.opman.dump_collection() result = docman._search()[0] keys = result.keys() for inc, exc in zip(include_fields, exclude_fields): self.assertIn(inc, keys) self.assertNotIn(exc, keys) def test_namespace_mapping(self): """Test mapping of namespaces Cases: upsert/delete/update of documents: 1. in namespace set, mapping provided 2. outside of namespace set, mapping provided """ source_ns = ["test.test1", "test.test2"] phony_ns = ["test.phony1", "test.phony2"] dest_mapping = {"test.test1": "test.test1_dest", "test.test2": "test.test2_dest"} self.opman.dest_mapping = dest_mapping self.opman.namespace_set = source_ns docman = self.opman.doc_managers[0] # start replicating self.opman.start() base_doc = {"_id": 1, "name": "superman"} # doc in namespace set for ns in source_ns: db, coll = ns.split(".", 1) # test insert self.primary_conn[db][coll].insert_one(base_doc) assert_soon(lambda: len(docman._search()) == 1) self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns]) bad = [d for d in docman._search() if d["ns"] == ns] self.assertEqual(len(bad), 0) # test update self.primary_conn[db][coll].update_one( {"_id": 1}, {"$set": {"weakness": "kryptonite"}} ) def update_complete(): docs = docman._search() for d in docs: if d.get("weakness") == "kryptonite": return True return False assert_soon(update_complete) self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns]) bad = [d for d in docman._search() if d["ns"] == ns] self.assertEqual(len(bad), 0) # test delete self.primary_conn[db][coll].delete_one({"_id": 1}) assert_soon(lambda: len(docman._search()) == 0) bad = [d for d in docman._search() if d["ns"] == dest_mapping[ns]] self.assertEqual(len(bad), 0) # cleanup self.primary_conn[db][coll].delete_many({}) self.opman.doc_managers[0]._delete() # doc not in namespace set for ns in phony_ns: db, coll = ns.split(".", 1) # test insert self.primary_conn[db][coll].insert_one(base_doc) time.sleep(1) self.assertEqual(len(docman._search()), 0) # test update self.primary_conn[db][coll].update_one( {"_id": 1}, {"$set": {"weakness": "kryptonite"}} ) time.sleep(1) self.assertEqual(len(docman._search()), 0) def test_many_targets(self): """Test that one OplogThread is capable of replicating to more than one target. """ doc_managers = [DocManager(), DocManager(), DocManager()] self.opman.doc_managers = doc_managers # start replicating self.opman.start() self.primary_conn["test"]["test"].insert_one({ "name": "kermit", "color": "green" }) self.primary_conn["test"]["test"].insert_one({ "name": "elmo", "color": "firetruck red" }) assert_soon( lambda: sum(len(d._search()) for d in doc_managers) == 6, "OplogThread should be able to replicate to multiple targets" ) self.primary_conn["test"]["test"].delete_one({"name": "elmo"}) assert_soon( lambda: sum(len(d._search()) for d in doc_managers) == 3, "OplogThread should be able to replicate to multiple targets" ) for d in doc_managers: self.assertEqual(d._search()[0]["name"], "kermit") def test_filter_oplog_entry(self): # Test oplog entries: these are callables, since # filter_oplog_entry modifies the oplog entry in-place insert_op = lambda: { "op": "i", "o": { "_id": 0, "a": 1, "b": 2, "c": 3 } } update_op = lambda: { "op": "u", "o": { "$set": { "a": 4, "b": 5 }, "$unset": { "c": True } }, "o2": { "_id": 1 } } # Case 0: insert op, no fields provided self.opman.fields = None filtered = self.opman.filter_oplog_entry(insert_op()) self.assertEqual(filtered, insert_op()) # Case 1: insert op, fields provided self.opman.fields = ['a', 'b'] filtered = self.opman.filter_oplog_entry(insert_op()) self.assertEqual(filtered['o'], {'_id': 0, 'a': 1, 'b': 2}) # Case 2: insert op, fields provided, doc becomes empty except for _id self.opman.fields = ['d', 'e', 'f'] filtered = self.opman.filter_oplog_entry(insert_op()) self.assertEqual(filtered['o'], {'_id': 0}) # Case 3: update op, no fields provided self.opman.fields = None filtered = self.opman.filter_oplog_entry(update_op()) self.assertEqual(filtered, update_op()) # Case 4: update op, fields provided self.opman.fields = ['a', 'c'] filtered = self.opman.filter_oplog_entry(update_op()) self.assertNotIn('b', filtered['o']['$set']) self.assertIn('a', filtered['o']['$set']) self.assertEqual(filtered['o']['$unset'], update_op()['o']['$unset']) # Case 5: update op, fields provided, empty $set self.opman.fields = ['c'] filtered = self.opman.filter_oplog_entry(update_op()) self.assertNotIn('$set', filtered['o']) self.assertEqual(filtered['o']['$unset'], update_op()['o']['$unset']) # Case 6: update op, fields provided, empty $unset self.opman.fields = ['a', 'b'] filtered = self.opman.filter_oplog_entry(update_op()) self.assertNotIn('$unset', filtered['o']) self.assertEqual(filtered['o']['$set'], update_op()['o']['$set']) # Case 7: update op, fields provided, entry is nullified self.opman.fields = ['d', 'e', 'f'] filtered = self.opman.filter_oplog_entry(update_op()) self.assertEqual(filtered, None) # Case 8: update op, fields provided, replacement self.opman.fields = ['a', 'b', 'c'] filtered = self.opman.filter_oplog_entry({ 'op': 'u', 'o': {'a': 1, 'b': 2, 'c': 3, 'd': 4} }) self.assertEqual( filtered, {'op': 'u', 'o': {'a': 1, 'b': 2, 'c': 3}})
class TestOplogManager(unittest.TestCase): """Defines all the testing methods, as well as a method that sets up the cluster """ def setUp(self): self.repl_set = ReplicaSet().start() self.primary_conn = self.repl_set.client() self.oplog_coll = self.primary_conn.local['oplog.rs'] self.opman = OplogThread( primary_client=self.primary_conn, doc_managers=(DocManager(),), oplog_progress_dict=LockingDict() ) def tearDown(self): try: self.opman.join() except RuntimeError: pass # OplogThread may not have been started self.primary_conn.close() self.repl_set.stop() def test_get_oplog_cursor(self): '''Test the get_oplog_cursor method''' # timestamp is None - all oplog entries are returned. cursor = self.opman.get_oplog_cursor(None) self.assertEqual(cursor.count(), self.primary_conn["local"]["oplog.rs"].count()) # earliest entry is the only one at/after timestamp doc = {"ts": bson.Timestamp(1000, 0), "i": 1} self.primary_conn["test"]["test"].insert(doc) latest_timestamp = self.opman.get_last_oplog_timestamp() cursor = self.opman.get_oplog_cursor(latest_timestamp) self.assertNotEqual(cursor, None) self.assertEqual(cursor.count(), 1) next_entry_id = next(cursor)['o']['_id'] retrieved = self.primary_conn.test.test.find_one(next_entry_id) self.assertEqual(retrieved, doc) # many entries before and after timestamp self.primary_conn["test"]["test"].insert( {"i": i} for i in range(2, 1002)) oplog_cursor = self.oplog_coll.find( {'op': {'$ne': 'n'}, 'ns': {'$not': re.compile(r'\.(system|\$cmd)')}}, sort=[("ts", pymongo.ASCENDING)] ) # initial insert + 1000 more inserts self.assertEqual(oplog_cursor.count(), 1 + 1000) pivot = oplog_cursor.skip(400).limit(1)[0] goc_cursor = self.opman.get_oplog_cursor(pivot["ts"]) self.assertEqual(goc_cursor.count(), 1 + 1000 - 400) def test_get_last_oplog_timestamp(self): """Test the get_last_oplog_timestamp method""" # "empty" the oplog self.opman.oplog = self.primary_conn["test"]["emptycollection"] self.assertEqual(self.opman.get_last_oplog_timestamp(), None) # Test non-empty oplog self.opman.oplog = self.primary_conn["local"]["oplog.rs"] for i in range(1000): self.primary_conn["test"]["test"].insert({ "i": i + 500 }) oplog = self.primary_conn["local"]["oplog.rs"] oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(1)[0] self.assertEqual(self.opman.get_last_oplog_timestamp(), oplog["ts"]) def test_dump_collection(self): """Test the dump_collection method Cases: 1. empty oplog 2. non-empty oplog """ # Test with empty oplog self.opman.oplog = self.primary_conn["test"]["emptycollection"] last_ts = self.opman.dump_collection() self.assertEqual(last_ts, None) # Test with non-empty oplog self.opman.oplog = self.primary_conn["local"]["oplog.rs"] for i in range(1000): self.primary_conn["test"]["test"].insert({ "i": i + 500 }) last_ts = self.opman.get_last_oplog_timestamp() self.assertEqual(last_ts, self.opman.dump_collection()) self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000) def test_dump_collection_with_error(self): """Test the dump_collection method with invalid documents. Cases: 1. non-empty oplog, continue_on_error=True, invalid documents """ # non-empty oplog, continue_on_error=True, invalid documents self.opman.continue_on_error = True self.opman.oplog = self.primary_conn["local"]["oplog.rs"] docs = [{'a': i} for i in range(100)] for i in range(50, 60): docs[i]['_upsert_exception'] = True self.primary_conn['test']['test'].insert(docs) last_ts = self.opman.get_last_oplog_timestamp() self.assertEqual(last_ts, self.opman.dump_collection()) docs = self.opman.doc_managers[0]._search() docs.sort(key=lambda doc: doc['a']) self.assertEqual(len(docs), 90) expected_a = itertools.chain(range(0, 50), range(60, 100)) for doc, correct_a in zip(docs, expected_a): self.assertEqual(doc['a'], correct_a) def test_init_cursor(self): """Test the init_cursor method Cases: 1. no last checkpoint, no collection dump 2. no last checkpoint, collection dump ok and stuff to dump 3. no last checkpoint, nothing to dump, stuff in oplog 4. no last checkpoint, nothing to dump, nothing in oplog 5. no last checkpoint, no collection dump, stuff in oplog 6. last checkpoint exists 7. last checkpoint is behind """ # N.B. these sub-cases build off of each other and cannot be re-ordered # without side-effects # No last checkpoint, no collection dump, nothing in oplog # "change oplog collection" to put nothing in oplog self.opman.oplog = self.primary_conn["test"]["emptycollection"] self.opman.collection_dump = False self.assertTrue(all(doc['op'] == 'n' for doc in self.opman.init_cursor()[0])) self.assertEqual(self.opman.checkpoint, None) # No last checkpoint, empty collections, nothing in oplog self.opman.collection_dump = True cursor, cursor_len = self.opman.init_cursor() self.assertEqual(cursor, None) self.assertEqual(cursor_len, 0) self.assertEqual(self.opman.checkpoint, None) # No last checkpoint, empty collections, something in oplog self.opman.oplog = self.primary_conn['local']['oplog.rs'] collection = self.primary_conn["test"]["test"] collection.insert({"i": 1}) collection.remove({"i": 1}) time.sleep(3) last_ts = self.opman.get_last_oplog_timestamp() cursor, cursor_len = self.opman.init_cursor() self.assertEqual(cursor_len, 0) self.assertEqual(self.opman.checkpoint, last_ts) with self.opman.oplog_progress as prog: self.assertEqual(prog.get_dict()[str(self.opman.oplog)], last_ts) # No last checkpoint, no collection dump, something in oplog self.opman.oplog_progress = LockingDict() self.opman.collection_dump = False collection.insert({"i": 2}) last_ts = self.opman.get_last_oplog_timestamp() cursor, cursor_len = self.opman.init_cursor() for i in range(cursor_len - 1): next(cursor) self.assertEqual(next(cursor)['o']['i'], 2) self.assertEqual(self.opman.checkpoint, last_ts) # Last checkpoint exists progress = LockingDict() self.opman.oplog_progress = progress for i in range(1000): collection.insert({"i": i + 500}) entry = list( self.primary_conn["local"]["oplog.rs"].find(skip=200, limit=2)) progress.get_dict()[str(self.opman.oplog)] = entry[0]["ts"] self.opman.oplog_progress = progress self.opman.checkpoint = None cursor, cursor_len = self.opman.init_cursor() self.assertEqual(next(cursor)["ts"], entry[1]["ts"]) self.assertEqual(self.opman.checkpoint, entry[0]["ts"]) with self.opman.oplog_progress as prog: self.assertEqual(prog.get_dict()[str(self.opman.oplog)], entry[0]["ts"]) # Last checkpoint is behind progress = LockingDict() progress.get_dict()[str(self.opman.oplog)] = bson.Timestamp(1, 0) self.opman.oplog_progress = progress self.opman.checkpoint = None cursor, cursor_len = self.opman.init_cursor() self.assertEqual(cursor_len, 0) self.assertEqual(cursor, None) self.assertIsNotNone(self.opman.checkpoint) def test_filter_fields(self): docman = self.opman.doc_managers[0] conn = self.opman.primary_client include_fields = ["a", "b", "c"] exclude_fields = ["d", "e", "f"] # Set fields to care about self.opman.fields = include_fields # Documents have more than just these fields doc = { "a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "_id": 1 } db = conn['test']['test'] db.insert(doc) assert_soon(lambda: db.count() == 1) self.opman.dump_collection() result = docman._search()[0] keys = result.keys() for inc, exc in zip(include_fields, exclude_fields): self.assertIn(inc, keys) self.assertNotIn(exc, keys) def test_namespace_mapping(self): """Test mapping of namespaces Cases: upsert/delete/update of documents: 1. in namespace set, mapping provided 2. outside of namespace set, mapping provided """ source_ns = ["test.test1", "test.test2"] phony_ns = ["test.phony1", "test.phony2"] dest_mapping = {"test.test1": "test.test1_dest", "test.test2": "test.test2_dest"} self.opman.dest_mapping = dest_mapping self.opman.namespace_set = source_ns docman = self.opman.doc_managers[0] # start replicating self.opman.start() base_doc = {"_id": 1, "name": "superman"} # doc in namespace set for ns in source_ns: db, coll = ns.split(".", 1) # test insert self.primary_conn[db][coll].insert(base_doc) assert_soon(lambda: len(docman._search()) == 1) self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns]) bad = [d for d in docman._search() if d["ns"] == ns] self.assertEqual(len(bad), 0) # test update self.primary_conn[db][coll].update( {"_id": 1}, {"$set": {"weakness": "kryptonite"}} ) def update_complete(): docs = docman._search() for d in docs: if d.get("weakness") == "kryptonite": return True return False assert_soon(update_complete) self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns]) bad = [d for d in docman._search() if d["ns"] == ns] self.assertEqual(len(bad), 0) # test delete self.primary_conn[db][coll].remove({"_id": 1}) assert_soon(lambda: len(docman._search()) == 0) bad = [d for d in docman._search() if d["ns"] == dest_mapping[ns]] self.assertEqual(len(bad), 0) # cleanup self.primary_conn[db][coll].remove() self.opman.doc_managers[0]._delete() # doc not in namespace set for ns in phony_ns: db, coll = ns.split(".", 1) # test insert self.primary_conn[db][coll].insert(base_doc) time.sleep(1) self.assertEqual(len(docman._search()), 0) # test update self.primary_conn[db][coll].update( {"_id": 1}, {"$set": {"weakness": "kryptonite"}} ) time.sleep(1) self.assertEqual(len(docman._search()), 0) def test_many_targets(self): """Test that one OplogThread is capable of replicating to more than one target. """ doc_managers = [DocManager(), DocManager(), DocManager()] self.opman.doc_managers = doc_managers # start replicating self.opman.start() self.primary_conn["test"]["test"].insert({ "name": "kermit", "color": "green" }) self.primary_conn["test"]["test"].insert({ "name": "elmo", "color": "firetruck red" }) assert_soon( lambda: sum(len(d._search()) for d in doc_managers) == 6, "OplogThread should be able to replicate to multiple targets" ) self.primary_conn["test"]["test"].remove({"name": "elmo"}) assert_soon( lambda: sum(len(d._search()) for d in doc_managers) == 3, "OplogThread should be able to replicate to multiple targets" ) for d in doc_managers: self.assertEqual(d._search()[0]["name"], "kermit") def test_filter_oplog_entry(self): # Test oplog entries: these are callables, since # filter_oplog_entry modifies the oplog entry in-place insert_op = lambda: { "op": "i", "o": { "_id": 0, "a": 1, "b": 2, "c": 3 } } update_op = lambda: { "op": "u", "o": { "$set": { "a": 4, "b": 5 }, "$unset": { "c": True } }, "o2": { "_id": 1 } } # Case 0: insert op, no fields provided self.opman.fields = None filtered = self.opman.filter_oplog_entry(insert_op()) self.assertEqual(filtered, insert_op()) # Case 1: insert op, fields provided self.opman.fields = ['a', 'b'] filtered = self.opman.filter_oplog_entry(insert_op()) self.assertEqual(filtered['o'], {'_id': 0, 'a': 1, 'b': 2}) # Case 2: insert op, fields provided, doc becomes empty except for _id self.opman.fields = ['d', 'e', 'f'] filtered = self.opman.filter_oplog_entry(insert_op()) self.assertEqual(filtered['o'], {'_id': 0}) # Case 3: update op, no fields provided self.opman.fields = None filtered = self.opman.filter_oplog_entry(update_op()) self.assertEqual(filtered, update_op()) # Case 4: update op, fields provided self.opman.fields = ['a', 'c'] filtered = self.opman.filter_oplog_entry(update_op()) self.assertNotIn('b', filtered['o']['$set']) self.assertIn('a', filtered['o']['$set']) self.assertEqual(filtered['o']['$unset'], update_op()['o']['$unset']) # Case 5: update op, fields provided, empty $set self.opman.fields = ['c'] filtered = self.opman.filter_oplog_entry(update_op()) self.assertNotIn('$set', filtered['o']) self.assertEqual(filtered['o']['$unset'], update_op()['o']['$unset']) # Case 6: update op, fields provided, empty $unset self.opman.fields = ['a', 'b'] filtered = self.opman.filter_oplog_entry(update_op()) self.assertNotIn('$unset', filtered['o']) self.assertEqual(filtered['o']['$set'], update_op()['o']['$set']) # Case 7: update op, fields provided, entry is nullified self.opman.fields = ['d', 'e', 'f'] filtered = self.opman.filter_oplog_entry(update_op()) self.assertEqual(filtered, None) # Case 8: update op, fields provided, replacement self.opman.fields = ['a', 'b', 'c'] filtered = self.opman.filter_oplog_entry({ 'op': 'u', 'o': {'a': 1, 'b': 2, 'c': 3, 'd': 4} }) self.assertEqual( filtered, {'op': 'u', 'o': {'a': 1, 'b': 2, 'c': 3}})
class TestCommandReplication(unittest.TestCase): def setUp(self): self.repl_set = ReplicaSet().start() self.primary_conn = self.repl_set.client() self.oplog_progress = LockingDict() self.opman = None def tearDown(self): try: if self.opman: self.opman.join() except RuntimeError: pass close_client(self.primary_conn) self.repl_set.stop() def initOplogThread(self, namespace_set=[], dest_mapping={}): self.docman = CommandLoggerDocManager() self.docman.command_helper = CommandHelper(namespace_set, dest_mapping) self.opman = OplogThread( primary_client=self.primary_conn, doc_managers=(self.docman,), oplog_progress_dict=self.oplog_progress, ns_set=namespace_set, dest_mapping=dest_mapping, collection_dump=False ) self.opman.start() def test_command_helper(self): # Databases cannot be merged mapping = { 'a.x': 'c.x', 'b.x': 'c.y' } self.assertRaises(errors.MongoConnectorError, CommandHelper, list(mapping), mapping) mapping = { 'a.x': 'b.x', 'a.y': 'c.y' } helper = CommandHelper(list(mapping) + ['a.z'], mapping) self.assertEqual(set(helper.map_db('a')), set(['a', 'b', 'c'])) self.assertEqual(helper.map_db('d'), []) self.assertEqual(helper.map_namespace('a.x'), 'b.x') self.assertEqual(helper.map_namespace('a.z'), 'a.z') self.assertEqual(helper.map_namespace('d.x'), None) self.assertEqual(helper.map_collection('a', 'x'), ('b', 'x')) self.assertEqual(helper.map_collection('a', 'z'), ('a', 'z')) self.assertEqual(helper.map_collection('d', 'x'), (None, None)) def test_create_collection(self): self.initOplogThread() pymongo.collection.Collection( self.primary_conn['test'], 'test', create=True) assert_soon(lambda: self.docman.commands) self.assertEqual(self.docman.commands[0], {'create': 'test'}) def test_create_collection_skipped(self): self.initOplogThread(['test.test']) pymongo.collection.Collection( self.primary_conn['test2'], 'test2', create=True) pymongo.collection.Collection( self.primary_conn['test'], 'test', create=True) assert_soon(lambda: self.docman.commands) self.assertEqual(len(self.docman.commands), 1) self.assertEqual(self.docman.commands[0], {'create': 'test'}) def test_drop_collection(self): self.initOplogThread() coll = pymongo.collection.Collection( self.primary_conn['test'], 'test', create=True) coll.drop() assert_soon(lambda: len(self.docman.commands) == 2) self.assertEqual(self.docman.commands[1], {'drop': 'test'}) def test_drop_database(self): self.initOplogThread() pymongo.collection.Collection( self.primary_conn['test'], 'test', create=True) self.primary_conn.drop_database('test') assert_soon(lambda: len(self.docman.commands) == 2) self.assertEqual(self.docman.commands[1], {'dropDatabase': 1}) def test_rename_collection(self): self.initOplogThread() coll = pymongo.collection.Collection( self.primary_conn['test'], 'test', create=True) coll.rename('test2') assert_soon(lambda: len(self.docman.commands) == 2) self.assertEqual( self.docman.commands[1].get('renameCollection'), 'test.test') self.assertEqual( self.docman.commands[1].get('to'), 'test.test2')