def test_single_target(self): """Test with a single replication target""" self.opman.start() # Insert first document with primary up self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn assert_soon(lambda: secondary["test"]["mc"].count() == 1, "first write didn't replicate to secondary") # Kill the primary kill_mongo_proc(self.primary_p, destroy=False) # Wait for the secondary to be promoted assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"]) # Insert another document. This will be rolled back later retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1}) self.assertEqual(secondary["test"]["mc"].count(), 2) # Wait for replication to doc manager assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2, "not all writes were replicated to doc manager") # Kill the new primary kill_mongo_proc(self.secondary_p, destroy=False) # Start both servers back up restart_mongo_proc(self.primary_p) primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")["ismaster"], "restarted primary never resumed primary status") restart_mongo_proc(self.secondary_p) assert_soon( lambda: retry_until_ok(secondary.admin.command, 'replSetGetStatus' )['myState'] == 2, "restarted secondary never resumed secondary status") assert_soon( lambda: retry_until_ok(self.main_conn.test.mc.find().count) > 0, "documents not found after primary/secondary restarted") # Only first document should exist in MongoDB self.assertEqual(self.main_conn["test"]["mc"].count(), 1) self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0) # Same case should hold for the doc manager doc_manager = self.opman.doc_managers[0] self.assertEqual(len(doc_manager._search()), 1) self.assertEqual(doc_manager._search()[0]["i"], 0) # cleanup self.opman.join()
def test_single_target(self): """Test with a single replication target""" self.opman.start() # Insert first document with primary up self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn assert_soon(lambda: secondary["test"]["mc"].count() == 1, "first write didn't replicate to secondary") # Kill the primary kill_mongo_proc(self.primary_p, destroy=False) # Wait for the secondary to be promoted while not secondary["admin"].command("isMaster")["ismaster"]: time.sleep(1) # Insert another document. This will be rolled back later retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1}) self.assertEqual(secondary["test"]["mc"].count(), 2) # Wait for replication to doc manager assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2, "not all writes were replicated to doc manager") # Kill the new primary kill_mongo_proc(self.secondary_p, destroy=False) # Start both servers back up restart_mongo_proc(self.primary_p) primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")["ismaster"], "restarted primary never resumed primary status") restart_mongo_proc(self.secondary_p) assert_soon(lambda: retry_until_ok(secondary.admin.command, 'replSetGetStatus')['myState'] == 2, "restarted secondary never resumed secondary status") assert_soon(lambda: retry_until_ok(self.main_conn.test.mc.find().count) > 0, "documents not found after primary/secondary restarted") # Only first document should exist in MongoDB self.assertEqual(self.main_conn["test"]["mc"].count(), 1) self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0) # Same case should hold for the doc manager doc_manager = self.opman.doc_managers[0] self.assertEqual(len(doc_manager._search()), 1) self.assertEqual(doc_manager._search()[0]["i"], 0) # cleanup self.opman.join()
def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Strategy for rollback is the same as before. """ for i in range(0, STRESS_COUNT): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) search = self.mongo_doc._search condition = lambda: sum(1 for _ in search()) == STRESS_COUNT assert_soon(condition) primary_conn = MongoClient(mongo_host, self.primary_p) kill_mongo_proc(self.primary_p, destroy=False) new_primary_conn = MongoClient(mongo_host, self.secondary_p) admin = new_primary_conn['admin'] assert_soon(lambda: admin.command("isMaster")['ismaster']) time.sleep(5) count = -1 while count + 1 < STRESS_COUNT: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}) except (OperationFailure, AutoReconnect): time.sleep(1) assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) == self. conn['test']['test'].find().count()) result_set_1 = self.mongo_doc._search() for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(self.secondary_p, destroy=False) restart_mongo_proc(self.primary_p) db_admin = primary_conn['admin'] assert_soon(lambda: db_admin.command("isMaster")['ismaster']) restart_mongo_proc(self.secondary_p) search = self.mongo_doc._search condition = lambda: sum(1 for _ in search()) == STRESS_COUNT assert_soon(condition) result_set_1 = list(self.mongo_doc._search()) self.assertEqual(len(result_set_1), STRESS_COUNT) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), STRESS_COUNT)
def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Strategy for rollback is the same as before. """ for i in range(0, STRESS_COUNT): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) search = self.mongo_doc._search condition = lambda: sum(1 for _ in search()) == STRESS_COUNT assert_soon(condition) primary_conn = MongoClient(mongo_host, self.primary_p) kill_mongo_proc(self.primary_p, destroy=False) new_primary_conn = MongoClient(mongo_host, self.secondary_p) admin = new_primary_conn['admin'] assert_soon(lambda: admin.command("isMaster")['ismaster']) time.sleep(5) count = -1 while count + 1 < STRESS_COUNT: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}) except (OperationFailure, AutoReconnect): time.sleep(1) assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) == self.conn['test']['test'].find().count()) result_set_1 = self.mongo_doc._search() for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(self.secondary_p, destroy=False) restart_mongo_proc(self.primary_p) db_admin = primary_conn['admin'] assert_soon(lambda: db_admin.command("isMaster")['ismaster']) restart_mongo_proc(self.secondary_p) search = self.mongo_doc._search condition = lambda: sum(1 for _ in search()) == STRESS_COUNT assert_soon(condition) result_set_1 = list(self.mongo_doc._search()) self.assertEqual(len(result_set_1), STRESS_COUNT) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), STRESS_COUNT)
def test_stressed_rollback(self): """Test stressed rollback with a large number of documents""" for i in range(0, STRESS_COUNT): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) assert_soon(lambda: sum(1 for _ in self.solr_conn.search( '*:*', rows=STRESS_COUNT)) == STRESS_COUNT) primary_conn = MongoClient(mongo_host, self.primary_p) kill_mongo_proc(self.primary_p, destroy=False) new_primary_conn = MongoClient(mongo_host, self.secondary_p) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < STRESS_COUNT: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}) except (OperationFailure, AutoReconnect): time.sleep(1) collection_size = self.conn['test']['test'].find().count() assert_soon(lambda: sum(1 for _ in self.solr_conn.search( '*:*', rows=STRESS_COUNT * 2)) == collection_size) result_set_1 = self.solr_conn.search('Pauline', rows=STRESS_COUNT * 2, sort='_id asc') for item in result_set_1: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(self.secondary_p, destroy=False) restart_mongo_proc(self.primary_p) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) restart_mongo_proc(self.secondary_p) assert_soon(lambda: sum(1 for _ in self.solr_conn.search( 'Pauline', rows=STRESS_COUNT * 2)) == 0) result_set_1 = list( self.solr_conn.search('Pauline', rows=STRESS_COUNT * 2)) self.assertEqual(len(result_set_1), 0) result_set_2 = list( self.solr_conn.search('Paul', rows=STRESS_COUNT * 2)) self.assertEqual(len(result_set_2), STRESS_COUNT)
def test_deletions(self): """Test rolling back 'd' operations""" self.opman.start() # Insert a document, wait till it replicates to secondary self.main_conn["test"]["mc"].insert({"i": 0}) self.main_conn["test"]["mc"].insert({"i": 1}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 2) assert_soon(lambda: self.secondary_conn["test"]["mc"].count() == 2, "first write didn't replicate to secondary") # Kill the primary, wait for secondary to be promoted kill_mongo_proc(self.primary_p, destroy=False) assert_soon(lambda: self.secondary_conn["admin"].command("isMaster")[ "ismaster"]) # Delete first document retry_until_ok(self.main_conn["test"]["mc"].remove, {"i": 0}) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 1) # Wait for replication to doc manager assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 1, "delete was not replicated to doc manager") # Kill the new primary kill_mongo_proc(self.secondary_p, destroy=False) # Start both servers back up restart_mongo_proc(self.primary_p) primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")["ismaster"], "restarted primary never resumed primary status") restart_mongo_proc(self.secondary_p) assert_soon( lambda: retry_until_ok(self.secondary_conn.admin.command, 'replSetGetStatus')['myState'] == 2, "restarted secondary never resumed secondary status") # Both documents should exist in mongo assert_soon( lambda: retry_until_ok(self.main_conn["test"]["mc"].count) == 2) # Both document should exist in doc manager doc_manager = self.opman.doc_managers[0] docs = list(doc_manager._search()) self.assertEqual(len(docs), 2, "Expected two documents, but got %r" % docs) self.opman.join()
def test_deletions(self): """Test rolling back 'd' operations""" self.opman.start() # Insert a document, wait till it replicates to secondary self.main_conn["test"]["mc"].insert({"i": 0}) self.main_conn["test"]["mc"].insert({"i": 1}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 2) assert_soon(lambda: self.secondary_conn["test"]["mc"].count() == 2, "first write didn't replicate to secondary") # Kill the primary, wait for secondary to be promoted kill_mongo_proc(self.primary_p, destroy=False) assert_soon(lambda: self.secondary_conn["admin"] .command("isMaster")["ismaster"]) # Delete first document retry_until_ok(self.main_conn["test"]["mc"].remove, {"i": 0}) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 1) # Wait for replication to doc manager assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 1, "delete was not replicated to doc manager") # Kill the new primary kill_mongo_proc(self.secondary_p, destroy=False) # Start both servers back up restart_mongo_proc(self.primary_p) primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")["ismaster"], "restarted primary never resumed primary status") restart_mongo_proc(self.secondary_p) assert_soon(lambda: retry_until_ok(self.secondary_conn.admin.command, 'replSetGetStatus')['myState'] == 2, "restarted secondary never resumed secondary status") # Both documents should exist in mongo assert_soon(lambda: retry_until_ok( self.main_conn["test"]["mc"].count) == 2) # Both document should exist in doc manager doc_manager = self.opman.doc_managers[0] docs = list(doc_manager._search()) self.assertEqual(len(docs), 2, "Expected two documents, but got %r" % docs) self.opman.join()
def test_rollback(self): """Test behavior during a MongoDB rollback. We force a rollback by adding a doc, killing the primary, adding another doc, killing the new primary, and then restarting both. """ primary_conn = MongoClient(mongo_host, self.primary_p) self.conn['test']['test'].insert({'name': 'paul'}) condition1 = lambda: self.conn['test']['test'].find( {'name': 'paul'}).count() == 1 condition2 = lambda: self._count() == 1 assert_soon(condition1) assert_soon(condition2) kill_mongo_proc(self.primary_p, destroy=False) new_primary_conn = MongoClient(mongo_host, self.secondary_p) admin = new_primary_conn['admin'] assert_soon(lambda: admin.command("isMaster")['ismaster']) time.sleep(5) retry_until_ok(self.conn.test.test.insert, {'name': 'pauline'}) assert_soon(lambda: self._count() == 2) result_set_1 = list(self._search()) result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 2) #make sure pauline is there for item in result_set_1: if item['name'] == 'pauline': self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(self.secondary_p, destroy=False) restart_mongo_proc(self.primary_p) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) restart_mongo_proc(self.secondary_p) time.sleep(2) result_set_1 = list(self._search()) self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['name'], 'paul') find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), 1)
def test_stressed_rollback(self): """Test stressed rollback with a large number of documents""" for i in range(0, STRESS_COUNT): self.conn["test"]["test"].insert({"name": "Paul " + str(i)}) assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*", rows=STRESS_COUNT)) == STRESS_COUNT) primary_conn = MongoClient(mongo_host, self.primary_p) kill_mongo_proc(self.primary_p, destroy=False) new_primary_conn = MongoClient(mongo_host, self.secondary_p) admin_db = new_primary_conn["admin"] while admin_db.command("isMaster")["ismaster"] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < STRESS_COUNT: try: count += 1 self.conn["test"]["test"].insert({"name": "Pauline " + str(count)}) except (OperationFailure, AutoReconnect): time.sleep(1) collection_size = self.conn["test"]["test"].find().count() assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*", rows=STRESS_COUNT * 2)) == collection_size) result_set_1 = self.solr_conn.search("Pauline", rows=STRESS_COUNT * 2, sort="_id asc") for item in result_set_1: result_set_2 = self.conn["test"]["test"].find_one({"name": item["name"]}) self.assertEqual(item["_id"], str(result_set_2["_id"])) kill_mongo_proc(self.secondary_p, destroy=False) restart_mongo_proc(self.primary_p) while primary_conn["admin"].command("isMaster")["ismaster"] is False: time.sleep(1) restart_mongo_proc(self.secondary_p) assert_soon(lambda: sum(1 for _ in self.solr_conn.search("Pauline", rows=STRESS_COUNT * 2)) == 0) result_set_1 = list(self.solr_conn.search("Pauline", rows=STRESS_COUNT * 2)) self.assertEqual(len(result_set_1), 0) result_set_2 = list(self.solr_conn.search("Paul", rows=STRESS_COUNT * 2)) self.assertEqual(len(result_set_2), STRESS_COUNT)
def test_rollback(self): """Tests rollback. We force a rollback by inserting one doc, killing primary, adding another doc, killing the new primary, and restarting both the servers. """ primary_conn = MongoClient(mongo_host, self.primary_p) self.conn['test']['test'].insert({'name': 'paul'}) assert_soon( lambda: self.conn.test.test.find({'name': 'paul'}).count() == 1) assert_soon( lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 1) kill_mongo_proc(self.primary_p, destroy=False) new_primary_conn = MongoClient(mongo_host, self.secondary_p) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) retry_until_ok(self.conn.test.test.insert, {'name': 'pauline'}) assert_soon( lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 2) result_set_1 = list(self.solr_conn.search('pauline')) result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(self.secondary_p, destroy=False) restart_mongo_proc(self.primary_p) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) restart_mongo_proc(self.secondary_p) time.sleep(2) result_set_1 = self.solr_conn.search('pauline') self.assertEqual(sum(1 for _ in result_set_1), 0) result_set_2 = self.solr_conn.search('paul') self.assertEqual(sum(1 for _ in result_set_2), 1)
def test_rollback(self): """Tests rollback. We force a rollback by inserting one doc, killing primary, adding another doc, killing the new primary, and restarting both the servers. """ primary_conn = MongoClient(mongo_host, self.primary_p) self.conn['test']['test'].insert({'name': 'paul'}) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.solr_conn.search('*:*')) != 1: time.sleep(1) kill_mongo_proc(self.primary_p, destroy=False) new_primary_conn = MongoClient(mongo_host, self.secondary_p) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) retry_until_ok(self.conn.test.test.insert, {'name': 'pauline'}) while (len(self.solr_conn.search('*:*')) != 2): time.sleep(1) result_set_1 = self.solr_conn.search('pauline') result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(self.secondary_p, destroy=False) restart_mongo_proc(self.primary_p) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) restart_mongo_proc(self.secondary_p) time.sleep(2) result_set_1 = self.solr_conn.search('pauline') self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('paul') self.assertEqual(len(result_set_2), 1)
def test_rollback(self): """Test the rollback method in a sharded environment Cases: 1. Documents on both shards, rollback on one shard 2. Documents on both shards, rollback on both shards """ self.opman1.start() self.opman2.start() # Insert first documents while primaries are up db_main = self.mongos_conn["test"]["mcsharded"] db_main.insert({"i": 0}, w=2) db_main.insert({"i": 1000}, w=2) self.assertEqual(self.shard1_conn["test"]["mcsharded"].count(), 1) self.assertEqual(self.shard2_conn["test"]["mcsharded"].count(), 1) # Case 1: only one primary goes down, shard1 in this case kill_mongo_proc(self.shard1_prim_p, destroy=False) # Wait for the secondary to be promoted shard1_secondary_admin = self.shard1_secondary_conn["admin"] assert_soon( lambda: shard1_secondary_admin.command("isMaster")["ismaster"]) # Insert another document. This will be rolled back later retry_until_ok(db_main.insert, {"i": 1}) db_secondary1 = self.shard1_secondary_conn["test"]["mcsharded"] db_secondary2 = self.shard2_secondary_conn["test"]["mcsharded"] self.assertEqual(db_secondary1.count(), 2) # Wait for replication on the doc manager # Note that both OplogThreads share the same doc manager c = lambda: len(self.opman1.doc_managers[0]._search()) == 3 assert_soon(c, "not all writes were replicated to doc manager", max_tries=120) # Kill the new primary kill_mongo_proc(self.shard1_scnd_p, destroy=False) # Start both servers back up restart_mongo_proc(self.shard1_prim_p) primary_admin = self.shard1_conn["admin"] c = lambda: primary_admin.command("isMaster")["ismaster"] assert_soon(lambda: retry_until_ok(c)) restart_mongo_proc(self.shard1_scnd_p) secondary_admin = self.shard1_secondary_conn["admin"] c = lambda: secondary_admin.command("replSetGetStatus")["myState"] == 2 assert_soon(c) query = {"i": {"$lt": 1000}} assert_soon(lambda: retry_until_ok(db_main.find(query).count) > 0) # Only first document should exist in MongoDB self.assertEqual(db_main.find(query).count(), 1) self.assertEqual(db_main.find_one(query)["i"], 0) # Same should hold for the doc manager docman_docs = [d for d in self.opman1.doc_managers[0]._search() if d["i"] < 1000] self.assertEqual(len(docman_docs), 1) self.assertEqual(docman_docs[0]["i"], 0) # Wait for previous rollback to complete def rollback_done(): secondary1_count = retry_until_ok(db_secondary1.count) secondary2_count = retry_until_ok(db_secondary2.count) return (1, 1) == (secondary1_count, secondary2_count) assert_soon(rollback_done, "rollback never replicated to one or more secondaries") ############################## # Case 2: Primaries on both shards go down kill_mongo_proc(self.shard1_prim_p, destroy=False) kill_mongo_proc(self.shard2_prim_p, destroy=False) # Wait for the secondaries to be promoted shard1_secondary_admin = self.shard1_secondary_conn["admin"] shard2_secondary_admin = self.shard2_secondary_conn["admin"] assert_soon( lambda: shard1_secondary_admin.command("isMaster")["ismaster"]) assert_soon( lambda: shard2_secondary_admin.command("isMaster")["ismaster"]) # Insert another document on each shard. These will be rolled back later retry_until_ok(db_main.insert, {"i": 1}) self.assertEqual(db_secondary1.count(), 2) retry_until_ok(db_main.insert, {"i": 1001}) self.assertEqual(db_secondary2.count(), 2) # Wait for replication on the doc manager c = lambda: len(self.opman1.doc_managers[0]._search()) == 4 assert_soon(c, "not all writes were replicated to doc manager") # Kill the new primaries kill_mongo_proc(self.shard1_scnd_p, destroy=False) kill_mongo_proc(self.shard2_scnd_p, destroy=False) # Start the servers back up... # Shard 1 restart_mongo_proc(self.shard1_prim_p) c = lambda: self.shard1_conn['admin'].command("isMaster")["ismaster"] assert_soon(lambda: retry_until_ok(c)) restart_mongo_proc(self.shard1_scnd_p) secondary_admin = self.shard1_secondary_conn["admin"] c = lambda: secondary_admin.command("replSetGetStatus")["myState"] == 2 assert_soon(c) # Shard 2 restart_mongo_proc(self.shard2_prim_p) c = lambda: self.shard2_conn['admin'].command("isMaster")["ismaster"] assert_soon(lambda: retry_until_ok(c)) restart_mongo_proc(self.shard2_scnd_p) secondary_admin = self.shard2_secondary_conn["admin"] c = lambda: secondary_admin.command("replSetGetStatus")["myState"] == 2 assert_soon(c) # Wait for the shards to come online assert_soon(lambda: retry_until_ok(db_main.find(query).count) > 0) query2 = {"i": {"$gte": 1000}} assert_soon(lambda: retry_until_ok(db_main.find(query2).count) > 0) # Only first documents should exist in MongoDB self.assertEqual(db_main.find(query).count(), 1) self.assertEqual(db_main.find_one(query)["i"], 0) self.assertEqual(db_main.find(query2).count(), 1) self.assertEqual(db_main.find_one(query2)["i"], 1000) # Same should hold for the doc manager i_values = [d["i"] for d in self.opman1.doc_managers[0]._search()] self.assertEqual(len(i_values), 2) self.assertIn(0, i_values) self.assertIn(1000, i_values)
def test_many_targets(self): """Test with several replication targets""" # OplogThread has multiple doc managers doc_managers = [DocManager(), DocManager(), DocManager()] self.opman.doc_managers = doc_managers self.opman.start() # Insert a document into each namespace self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1), "first write didn't replicate to secondary") # Kill the primary kill_mongo_proc(self.primary_p, destroy=False) # Wait for the secondary to be promoted while not secondary["admin"].command("isMaster")["ismaster"]: time.sleep(1) # Insert more documents. This will be rolled back later # Some of these documents will be manually removed from # certain doc managers, to emulate the effect of certain # target systems being ahead/behind others secondary_ids = [] for i in range(1, 10): secondary_ids.append( retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": i})) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10) # Wait for replication to the doc managers def docmans_done(): for dm in self.opman.doc_managers: if len(dm._search()) != 10: return False return True self.assertTrue(wait_for(docmans_done), "not all writes were replicated to doc managers") # Remove some documents from the doc managers to simulate # uneven replication for id in secondary_ids[8:]: self.opman.doc_managers[1].remove({"_id": id}) for id in secondary_ids[2:]: self.opman.doc_managers[2].remove({"_id": id}) # Kill the new primary kill_mongo_proc(self.secondary_p, destroy=False) # Start both servers back up restart_mongo_proc(self.primary_p) primary_admin = self.primary_conn["admin"] while not primary_admin.command("isMaster")["ismaster"]: time.sleep(1) restart_mongo_proc(self.secondary_p) while retry_until_ok(secondary["admin"].command, "replSetGetStatus")["myState"] != 2: time.sleep(1) while retry_until_ok(self.primary_conn["test"]["mc"].find().count) == 0: time.sleep(1) # Only first document should exist in MongoDB self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0) # Give OplogThread some time to catch up time.sleep(10) # Same case should hold for the doc managers for dm in self.opman.doc_managers: self.assertEqual(len(dm._search()), 1) self.assertEqual(dm._search()[0]["i"], 0) self.opman.join()
def test_many_targets(self): """Test with several replication targets""" # OplogThread has multiple doc managers doc_managers = [DocManager(), DocManager(), DocManager()] self.opman.doc_managers = doc_managers self.opman.start() # Insert a document into each namespace self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn assert_soon(lambda: secondary["test"]["mc"].count() == 1, "first write didn't replicate to secondary") # Kill the primary kill_mongo_proc(self.primary_p, destroy=False) # Wait for the secondary to be promoted assert_soon(lambda: secondary.admin.command("isMaster")['ismaster'], 'secondary was never promoted') # Insert more documents. This will be rolled back later # Some of these documents will be manually removed from # certain doc managers, to emulate the effect of certain # target systems being ahead/behind others secondary_ids = [] for i in range(1, 10): secondary_ids.append( retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": i})) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10) # Wait for replication to the doc managers def docmans_done(): for dm in self.opman.doc_managers: if len(dm._search()) != 10: return False return True assert_soon(docmans_done, "not all writes were replicated to doc managers") # Remove some documents from the doc managers to simulate # uneven replication ts = self.opman.doc_managers[0].get_last_doc()['_ts'] for id in secondary_ids[8:]: self.opman.doc_managers[1].remove({ "_id": id, "ns": "test.mc", "_ts": ts }) for id in secondary_ids[2:]: self.opman.doc_managers[2].remove({ "_id": id, "ns": "test.mc", "_ts": ts }) # Kill the new primary kill_mongo_proc(self.secondary_p, destroy=False) # Start both servers back up restart_mongo_proc(self.primary_p) primary_admin = self.primary_conn["admin"] assert_soon(lambda: primary_admin.command("isMaster")['ismaster'], 'restarted primary never resumed primary status') restart_mongo_proc(self.secondary_p) assert_soon( lambda: retry_until_ok(secondary.admin.command, 'replSetGetStatus' )['myState'] == 2, "restarted secondary never resumed secondary status") assert_soon( lambda: retry_until_ok(self.primary_conn.test.mc.find().count) > 0, "documents not found after primary/secondary restarted") # Only first document should exist in MongoDB self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0) # Give OplogThread some time to catch up time.sleep(10) # Same case should hold for the doc managers for dm in self.opman.doc_managers: self.assertEqual(len(dm._search()), 1) self.assertEqual(dm._search()[0]["i"], 0) self.opman.join()
def test_stressed_rollback(self): """Test stressed rollback with a large number of documents""" for i in range(0, STRESS_COUNT): self.conn['test']['test'].insert( {'name': 'Paul ' + str(i)}) while (len(self.solr_conn.search('*:*', rows=STRESS_COUNT)) != STRESS_COUNT): time.sleep(1) primary_conn = MongoClient(mongo_host, self.primary_p) kill_mongo_proc(self.primary_p, destroy=False) new_primary_conn = MongoClient(mongo_host, self.secondary_p) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < STRESS_COUNT: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.solr_conn.search('*:*', rows=STRESS_COUNT * 2)) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.solr_conn.search( 'Pauline', rows=STRESS_COUNT * 2, sort='_id asc' ) for item in result_set_1: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(self.secondary_p, destroy=False) restart_mongo_proc(self.primary_p) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) restart_mongo_proc(self.secondary_p) while (len(self.solr_conn.search( 'Pauline', rows=STRESS_COUNT * 2)) != 0): time.sleep(15) result_set_1 = self.solr_conn.search( 'Pauline', rows=STRESS_COUNT * 2 ) self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search( 'Paul', rows=STRESS_COUNT * 2 ) self.assertEqual(len(result_set_2), STRESS_COUNT)