def test_rollback(self): """Tests rollback. We force a rollback by adding a doc, killing the primary, adding another doc, killing the new primary, and then restarting both. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.mongo_doc._search()) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: result_set_1 = self.conn['test']['test'].insert( {'name': 'pauline'}, safe=True) break except OperationFailure: time.sleep(1) count += 1 if count >= 60: sys.exit(1) continue while (len(self.mongo_doc._search()) != 2): time.sleep(1) result_set_1 = self.mongo_doc._search() result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_2), 2) #make sure pauling is there for item in result_set_1: if item['name'] == 'pauline': self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['name'], 'paul') find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), 1)
def test_rollback(self): """Tests rollback. We force a rollback by adding a doc, killing the primary, adding another doc, killing the new primary, and then restarting both. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.mongo_doc._search()) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: result_set_1 = self.conn['test']['test'].insert( {'name': 'pauline'}, safe=True) break except OperationFailure: time.sleep(1) count += 1 if count >= 60: sys.exit(1) continue while(len(self.mongo_doc._search()) != 2): time.sleep(1) result_set_1 = self.mongo_doc._search() result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_2), 2) #make sure pauling is there for item in result_set_1: if item['name'] == 'pauline': self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['name'], 'paul') find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), 1)
def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Strategy for rollback is the same as before. """ while len(self.mongo_doc._search()) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}, safe=True) while len(self.mongo_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.mongo_doc._search()) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.mongo_doc._search() for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while (len(self.mongo_doc._search()) != NUMBER_OF_DOC_DIRS): time.sleep(5) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS)
def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Rollback is performed like before, but with more documents. """ while len(self.synchronizer._search()) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert( {'name': 'Paul ' + str(i)}, safe=True) while len(self.synchronizer._search()) != NUMBER_OF_DOC_DIRS: time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert({'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.synchronizer._search()) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.synchronizer._search() i = 0 for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while (len(self.synchronizer._search()) != NUMBER_OF_DOC_DIRS): time.sleep(5) result_set_1 = self.synchronizer._search() self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS)
def test_rollback(self): """Tests rollback. We force a rollback by inserting one doc, killing primary, adding another doc, killing the new primary, and restarting both the servers. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.solr_conn.search('*:*')) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: self.conn['test']['test'].insert( {'name': 'pauline'}, safe=True) break except OperationFailure: count += 1 if count > 60: self.fail('Call to insert failed too ' 'many times in test_rollback') time.sleep(1) continue while (len(self.solr_conn.search('*:*')) != 2): time.sleep(1) result_set_1 = self.solr_conn.search('pauline') result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.solr_conn.search('pauline') self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('paul') self.assertEqual(len(result_set_2), 1)
def test_rollback(self): """Tests rollback. We force a rollback by inserting one doc, killing primary, adding another doc, killing the new primary, and restarting both the servers. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.solr_conn.search('*:*')) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: self.conn['test']['test'].insert({'name': 'pauline'}, safe=True) break except OperationFailure: count += 1 if count > 60: self.fail('Call to insert failed too ' 'many times in test_rollback') time.sleep(1) continue while (len(self.solr_conn.search('*:*')) != 2): time.sleep(1) result_set_1 = self.solr_conn.search('pauline') result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.solr_conn.search('pauline') self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('paul') self.assertEqual(len(result_set_2), 1)
def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Strategy for rollback is the same as before. """ for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}, safe=True) search = self.mongo_doc._search condition = lambda : sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS wait_for(condition) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] wait_for(lambda : admin.command("isMaster")['ismaster']) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert({'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) wait_for(lambda : sum(1 for _ in self.mongo_doc._search()) == self.conn['test']['test'].find().count()) result_set_1 = self.mongo_doc._search() for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) db_admin = primary_conn['admin'] wait_for(lambda : db_admin.command("isMaster")['ismaster']) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) search = self.mongo_doc._search condition = lambda : sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS wait_for(condition) result_set_1 = list(self.mongo_doc._search()) self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS)
def test_rollback(self): """Tests rollback. Rollback is performed by inserting one document, killing primary, inserting another doc, killing secondary, and then restarting both. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: self.conn['test']['test'].insert({'name': 'pauline'}, safe=True) break except OperationFailure: count += 1 if count > 60: self.fail('Call to insert failed too' ' many times in test_rollback') time.sleep(1) continue while (len(self.synchronizer._search()) != 2): time.sleep(1) result_set_1 = self.synchronizer._search() result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 2) for item in result_set_1: if item['name'] == 'pauline': self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.synchronizer._search() self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['name'], 'paul') find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), 1)
def setUpClass(cls): cls.standalone_port = start_mongo_proc( options=['--nojournal', '--noprealloc']) cls.standalone_pair = '%s:%d' % (mongo_host, cls.standalone_port) cls.MongoDoc = DocManager(cls.standalone_pair) cls.mongo = MongoClient(cls.standalone_pair)['test']['test'] cls.namespaces_inc = ["test.test_include1", "test.test_include2"] cls.namespaces_exc = ["test.test_exclude1", "test.test_exclude2"] cls.choosy_docman = DocManager( cls.standalone_pair, namespace_set=MongoDocManagerTester.namespaces_inc)
def setUpClass(cls): cls.standalone_port = start_mongo_proc(options=['--nojournal', '--noprealloc']) cls.standalone_pair = '%s:%d' % (mongo_host, cls.standalone_port) cls.MongoDoc = DocManager(cls.standalone_pair) cls.mongo = MongoClient(cls.standalone_pair)['test']['test'] cls.namespaces_inc = ["test.test_include1", "test.test_include2"] cls.namespaces_exc = ["test.test_exclude1", "test.test_exclude2"] cls.choosy_docman = DocManager( cls.standalone_pair, namespace_set=MongoDocManagerTester.namespaces_inc )
def setUpClass(cls): try: os.unlink("config.txt") except OSError: pass open("config.txt", "w").close() cls.standalone_port = start_mongo_proc(options=['--nojournal', '--noprealloc']) cls.mongo_doc = DocManager('%s:%d' % (mongo_host, cls.standalone_port)) cls.mongo_doc._remove() _, cls.secondary_p, cls.primary_p = start_replica_set('test-mongo') cls.conn = MongoClient(mongo_host, cls.primary_p, replicaSet='test-mongo')
def setUpClass(cls): try: os.unlink("config.txt") except OSError: pass open("config.txt", "w").close() cls.standalone_port = start_mongo_proc( options=['--nojournal', '--noprealloc']) cls.mongo_doc = DocManager('%s:%d' % (mongo_host, cls.standalone_port)) cls.mongo_doc._remove() _, cls.secondary_p, cls.primary_p = start_replica_set('test-mongo') cls.conn = MongoClient(mongo_host, cls.primary_p, replicaSet='test-mongo')
def test_single_target(self): """Test with a single replication target""" self.opman.start() # Insert first document with primary up self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1), "first write didn't replicate to secondary") # Kill the primary kill_mongo_proc("localhost", PORTS_ONE["PRIMARY"]) # Wait for the secondary to be promoted while not secondary["admin"].command("isMaster")["ismaster"]: time.sleep(1) # Insert another document. This will be rolled back later retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1}) self.assertEqual(secondary["test"]["mc"].count(), 2) # Wait for replication to doc manager c = lambda: len(self.opman.doc_managers[0]._search()) == 2 self.assertTrue(wait_for(c), "not all writes were replicated to doc manager") # Kill the new primary kill_mongo_proc("localhost", PORTS_ONE["SECONDARY"]) # Start both servers back up start_mongo_proc( port=PORTS_ONE['PRIMARY'], repl_set_name="demo-repl", data="/replset1a", log="/replset1a.log", key_file=None ) primary_admin = self.primary_conn["admin"] while not primary_admin.command("isMaster")["ismaster"]: time.sleep(1) start_mongo_proc( port=PORTS_ONE['SECONDARY'], repl_set_name="demo-repl", data="/replset1b", log="/replset1b.log", key_file=None ) while secondary["admin"].command("replSetGetStatus")["myState"] != 2: time.sleep(1) while retry_until_ok(self.main_conn["test"]["mc"].find().count) == 0: time.sleep(1) # Only first document should exist in MongoDB self.assertEqual(self.main_conn["test"]["mc"].count(), 1) self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0) # Same case should hold for the doc manager doc_manager = self.opman.doc_managers[0] self.assertEqual(len(doc_manager._search()), 1) self.assertEqual(doc_manager._search()[0]["i"], 0) # cleanup self.opman.join()
def test_many_targets(self): """Test with several replication targets""" # OplogThread has multiple doc managers doc_managers = [DocManager(), DocManager(), DocManager()] self.opman.doc_managers = doc_managers self.opman.start() # Insert a document into each namespace self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1), "first write didn't replicate to secondary") # Kill the primary kill_mongo_proc("localhost", PORTS_ONE["PRIMARY"]) # Wait for the secondary to be promoted while not secondary["admin"].command("isMaster")["ismaster"]: time.sleep(1) # Insert more documents. This will be rolled back later # Some of these documents will be manually removed from # certain doc managers, to emulate the effect of certain # target systems being ahead/behind others secondary_ids = [] for i in range(1, 10): secondary_ids.append( retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": i})) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10) # Wait for replication to the doc managers def docmans_done(): for dm in self.opman.doc_managers: if len(dm._search()) != 10: return False return True self.assertTrue(wait_for(docmans_done), "not all writes were replicated to doc managers") # Remove some documents from the doc managers to simulate # uneven replication for id in secondary_ids[8:]: self.opman.doc_managers[1].remove({"_id": id}) for id in secondary_ids[2:]: self.opman.doc_managers[2].remove({"_id": id}) # Kill the new primary kill_mongo_proc("localhost", PORTS_ONE["SECONDARY"]) # Start both servers back up start_mongo_proc(port=PORTS_ONE['PRIMARY'], repl_set_name="demo-repl", data="/replset1a", log="/replset1a.log", key_file=None) primary_admin = self.primary_conn["admin"] while not primary_admin.command("isMaster")["ismaster"]: time.sleep(1) start_mongo_proc(port=PORTS_ONE['SECONDARY'], repl_set_name="demo-repl", data="/replset1b", log="/replset1b.log", key_file=None) while secondary["admin"].command("replSetGetStatus")["myState"] != 2: time.sleep(1) while retry_until_ok( self.primary_conn["test"]["mc"].find().count) == 0: time.sleep(1) # Only first document should exist in MongoDB self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0) # Same case should hold for the doc managers for dm in self.opman.doc_managers: self.assertEqual(len(dm._search()), 1) self.assertEqual(dm._search()[0]["i"], 0) self.opman.join()
def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. The rollback is performed the same way as before but with more docs """ self.conn['test']['test'].remove() while len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}, safe=True) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != NUMBER_OF_DOC_DIRS): time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS * 2)) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2, sort='_id asc') for item in result_set_1: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while (len( self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2)) != 0): time.sleep(15) result_set_1 = self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2) self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('Paul', rows=NUMBER_OF_DOC_DIRS * 2) self.assertEqual(len(result_set_2), NUMBER_OF_DOC_DIRS)
def test_many_targets(self): """Test with several replication targets""" # OplogThread has multiple doc managers doc_managers = [DocManager(), DocManager(), DocManager()] self.opman.doc_managers = doc_managers self.opman.start() # Insert a document into each namespace self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1), "first write didn't replicate to secondary") # Kill the primary kill_mongo_proc("localhost", PORTS_ONE["PRIMARY"]) # Wait for the secondary to be promoted while not secondary["admin"].command("isMaster")["ismaster"]: time.sleep(1) # Insert more documents. This will be rolled back later # Some of these documents will be manually removed from # certain doc managers, to emulate the effect of certain # target systems being ahead/behind others secondary_ids = [] for i in range(1, 10): secondary_ids.append( retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": i})) self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10) # Wait for replication to the doc managers def docmans_done(): for dm in self.opman.doc_managers: if len(dm._search()) != 10: return False return True self.assertTrue(wait_for(docmans_done), "not all writes were replicated to doc managers") # Remove some documents from the doc managers to simulate # uneven replication for id in secondary_ids[8:]: self.opman.doc_managers[1].remove({"_id": id}) for id in secondary_ids[2:]: self.opman.doc_managers[2].remove({"_id": id}) # Kill the new primary kill_mongo_proc("localhost", PORTS_ONE["SECONDARY"]) # Start both servers back up start_mongo_proc( port=PORTS_ONE['PRIMARY'], repl_set_name="demo-repl", data="/replset1a", log="/replset1a.log", key_file=None ) primary_admin = self.primary_conn["admin"] while not primary_admin.command("isMaster")["ismaster"]: time.sleep(1) start_mongo_proc( port=PORTS_ONE['SECONDARY'], repl_set_name="demo-repl", data="/replset1b", log="/replset1b.log", key_file=None ) while secondary["admin"].command("replSetGetStatus")["myState"] != 2: time.sleep(1) while retry_until_ok(self.primary_conn["test"]["mc"].find().count) == 0: time.sleep(1) # Only first document should exist in MongoDB self.assertEqual(self.primary_conn["test"]["mc"].count(), 1) self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0) # Same case should hold for the doc managers for dm in self.opman.doc_managers: self.assertEqual(len(dm._search()), 1) self.assertEqual(dm._search()[0]["i"], 0) self.opman.join()
def test_rollback(self): """Test rollback in oplog_manager. Assertion failure if it doesn't pass We force a rollback by inserting a doc, killing primary, inserting another doc, killing the new primary, and then restarting both servers. """ os.system('rm %s; touch %s' % (CONFIG, CONFIG)) if not start_cluster(sharded=True): self.fail("Shards cannot be added to mongos") test_oplog, primary_conn, solr, mongos = self.get_new_oplog() solr = DocManager() test_oplog.doc_manager = solr solr._delete() # equivalent to solr.delete(q='*:*') safe_mongo_op(mongos['alpha']['foo'].remove, {}) safe_mongo_op(mongos['alpha']['foo'].insert, { '_id': ObjectId('4ff74db3f646462b38000001'), 'name': 'paulie' }) cutoff_ts = test_oplog.get_last_oplog_timestamp() obj2 = ObjectId('4ff74db3f646462b38000002') first_doc = { 'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts), 'ns': 'alpha.foo', '_id': ObjectId('4ff74db3f646462b38000001') } # try kill one, try restarting kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: mongos['alpha']['foo'].insert({'_id': obj2, 'name': 'paul'}) break except OperationFailure: time.sleep(1) count += 1 if count > 60: self.fail('Insert failed too many times in rollback') continue kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) # wait for master to be established while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) # wait for secondary to be established admin_db = new_primary_conn['admin'] while admin_db.command("replSetGetStatus")['myState'] != 2: time.sleep(1) while retry_until_ok(mongos['alpha']['foo'].find().count) != 1: time.sleep(1) self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY']) self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY']) last_ts = test_oplog.get_last_oplog_timestamp() second_doc = { 'name': 'paul', '_ts': bson_ts_to_long(last_ts), 'ns': 'alpha.foo', '_id': obj2 } test_oplog.doc_manager.upsert(first_doc) test_oplog.doc_manager.upsert(second_doc) test_oplog.rollback() test_oplog.doc_manager.commit() results = solr._search() self.assertEqual(len(results), 1) results_doc = results[0] self.assertEqual(results_doc['name'], 'paulie') self.assertTrue(results_doc['_ts'] <= bson_ts_to_long(cutoff_ts))
def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. The rollback is performed the same way as before but with more docs """ self.conn['test']['test'].remove() while len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert( {'name': 'Paul ' + str(i)}, safe=True) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != NUMBER_OF_DOC_DIRS): time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS * 2)) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2, sort='_id asc') for item in result_set_1: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while (len(self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2)) != 0): time.sleep(15) result_set_1 = self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2) self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('Paul', rows=NUMBER_OF_DOC_DIRS * 2) self.assertEqual(len(result_set_2), NUMBER_OF_DOC_DIRS)
def test_rollback(self): """Test rollback in oplog_manager. Assertion failure if it doesn't pass We force a rollback by inserting a doc, killing the primary, inserting another doc, killing the new primary, and then restarting both. """ os.system('rm config.txt; touch config.txt') test_oplog, primary_conn, mongos, solr = self.get_new_oplog() if not start_cluster(): self.fail('Cluster could not be started successfully!') solr = DocManager() test_oplog.doc_manager = solr solr._delete() # equivalent to solr.delete(q='*: *') mongos['test']['test'].remove({}) mongos['test']['test'].insert( {'_id': ObjectId('4ff74db3f646462b38000001'), 'name': 'paulie'}, safe=True ) while (mongos['test']['test'].find().count() != 1): time.sleep(1) cutoff_ts = test_oplog.get_last_oplog_timestamp() first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts), 'ns': 'test.test', '_id': ObjectId('4ff74db3f646462b38000001')} #try kill one, try restarting kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: mongos['test']['test'].insert({ '_id': ObjectId('4ff74db3f646462b38000002'), 'name': 'paul'}, safe=True) break except OperationFailure: count += 1 if count > 60: self.fail('Call to insert doc failed too many times') time.sleep(1) continue while (mongos['test']['test'].find().count() != 2): time.sleep(1) kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) #wait for master to be established while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) #wait for secondary to be established admin = new_primary_conn['admin'] while admin.command("replSetGetStatus")['myState'] != 2: time.sleep(1) while retry_until_ok(mongos['test']['test'].find().count) != 1: time.sleep(1) self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY']) self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY']) last_ts = test_oplog.get_last_oplog_timestamp() second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts), 'ns': 'test.test', '_id': ObjectId('4ff74db3f646462b38000002')} test_oplog.doc_manager.upsert(first_doc) test_oplog.doc_manager.upsert(second_doc) test_oplog.rollback() test_oplog.doc_manager.commit() results = solr._search() assert(len(results) == 1) self.assertEqual(results[0]['name'], 'paulie') self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))
def test_rollback(self): """Test rollback in oplog_manager. Assertion failure if it doesn't pass We force a rollback by inserting a doc, killing primary, inserting another doc, killing the new primary, and then restarting both servers. """ os.system('rm %s; touch %s' % (CONFIG, CONFIG)) if not start_cluster(sharded=True): self.fail("Shards cannot be added to mongos") test_oplog, primary_conn, solr, mongos = self.get_new_oplog() solr = DocManager() test_oplog.doc_manager = solr solr._delete() # equivalent to solr.delete(q='*:*') safe_mongo_op(mongos['alpha']['foo'].remove, {}) safe_mongo_op(mongos['alpha']['foo'].insert, {'_id': ObjectId('4ff74db3f646462b38000001'), 'name': 'paulie'}) cutoff_ts = test_oplog.get_last_oplog_timestamp() obj2 = ObjectId('4ff74db3f646462b38000002') first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts), 'ns': 'alpha.foo', '_id': ObjectId('4ff74db3f646462b38000001')} # try kill one, try restarting kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: mongos['alpha']['foo'].insert({'_id': obj2, 'name': 'paul'}) break except OperationFailure: time.sleep(1) count += 1 if count > 60: self.fail('Insert failed too many times in rollback') continue kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) # wait for master to be established while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) # wait for secondary to be established admin_db = new_primary_conn['admin'] while admin_db.command("replSetGetStatus")['myState'] != 2: time.sleep(1) while retry_until_ok(mongos['alpha']['foo'].find().count) != 1: time.sleep(1) self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY']) self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY']) last_ts = test_oplog.get_last_oplog_timestamp() second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts), 'ns': 'alpha.foo', '_id': obj2} test_oplog.doc_manager.upsert(first_doc) test_oplog.doc_manager.upsert(second_doc) test_oplog.rollback() test_oplog.doc_manager.commit() results = solr._search() self.assertEqual(len(results), 1) results_doc = results[0] self.assertEqual(results_doc['name'], 'paulie') self.assertTrue(results_doc['_ts'] <= bson_ts_to_long(cutoff_ts))
def test_single_target(self): """Test with a single replication target""" self.opman.start() # Insert first document with primary up self.main_conn["test"]["mc"].insert({"i": 0}) self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1) # Make sure the insert is replicated secondary = self.secondary_conn self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1), "first write didn't replicate to secondary") # Kill the primary kill_mongo_proc("localhost", PORTS_ONE["PRIMARY"]) # Wait for the secondary to be promoted while not secondary["admin"].command("isMaster")["ismaster"]: time.sleep(1) # Insert another document. This will be rolled back later retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1}) self.assertEqual(secondary["test"]["mc"].count(), 2) # Wait for replication to doc manager c = lambda: len(self.opman.doc_managers[0]._search()) == 2 self.assertTrue(wait_for(c), "not all writes were replicated to doc manager") # Kill the new primary kill_mongo_proc("localhost", PORTS_ONE["SECONDARY"]) # Start both servers back up start_mongo_proc(port=PORTS_ONE['PRIMARY'], repl_set_name="demo-repl", data="/replset1a", log="/replset1a.log", key_file=None) primary_admin = self.primary_conn["admin"] while not primary_admin.command("isMaster")["ismaster"]: time.sleep(1) start_mongo_proc(port=PORTS_ONE['SECONDARY'], repl_set_name="demo-repl", data="/replset1b", log="/replset1b.log", key_file=None) while secondary["admin"].command("replSetGetStatus")["myState"] != 2: time.sleep(1) while retry_until_ok(self.main_conn["test"]["mc"].find().count) == 0: time.sleep(1) # Only first document should exist in MongoDB self.assertEqual(self.main_conn["test"]["mc"].count(), 1) self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0) # Same case should hold for the doc manager doc_manager = self.opman.doc_managers[0] self.assertEqual(len(doc_manager._search()), 1) self.assertEqual(doc_manager._search()[0]["i"], 0) # cleanup self.opman.join()