def test_init_cursor(self): """Test init_cursor in oplog_manager. Assertion failure if it doesn't pass """ test_oplog, primary_conn, search_ts = self.get_oplog_thread() test_oplog.checkpoint = None # needed for these tests # initial tests with no config file and empty oplog self.assertEqual(test_oplog.init_cursor(), None) # no config, single oplog entry primary_conn["test"]["test"].insert({"name": "paulie"}) search_ts = test_oplog.get_last_oplog_timestamp() cursor = test_oplog.init_cursor() self.assertEqual(cursor.count(), 1) self.assertEqual(test_oplog.checkpoint, search_ts) # with config file, assert that size != 0 os.system("touch temp_config.txt") cursor = test_oplog.init_cursor() oplog_dict = test_oplog.oplog_progress.get_dict() self.assertEqual(cursor.count(), 1) self.assertTrue(str(test_oplog.oplog) in oplog_dict) self.assertTrue(oplog_dict[str(test_oplog.oplog)] == test_oplog.checkpoint) os.system("rm temp_config.txt") # test init_cursor when OplogThread created with/without no-dump option # insert some documents (will need to be dumped) primary_conn["test"]["test"].remove() primary_conn["test"]["test"].insert(({"_id": i} for i in range(100))) # test no-dump option docman = DocManager() docman._delete() test_oplog.doc_manager = docman test_oplog.collection_dump = False test_oplog.oplog_progress = LockingDict() # init_cursor has the side-effect of causing a collection dump test_oplog.init_cursor() self.assertEqual(len(docman._search()), 0) # test w/o no-dump option docman._delete() test_oplog.collection_dump = True test_oplog.oplog_progress = LockingDict() test_oplog.init_cursor() self.assertEqual(len(docman._search()), 100)
def test_rollback(self): """Test rollback in oplog_manager. Assertion failure if it doesn't pass We force a rollback by inserting a doc, killing primary, inserting another doc, killing the new primary, and then restarting both servers. """ os.system('rm %s; touch %s' % (CONFIG, CONFIG)) if not start_cluster(sharded=True): self.fail("Shards cannot be added to mongos") test_oplog, primary_conn, solr, mongos = self.get_new_oplog() solr = DocManager() test_oplog.doc_manager = solr solr._delete() # equivalent to solr.delete(q='*:*') safe_mongo_op(mongos['alpha']['foo'].remove, {}) safe_mongo_op(mongos['alpha']['foo'].insert, {'_id': ObjectId('4ff74db3f646462b38000001'), 'name': 'paulie'}) cutoff_ts = test_oplog.get_last_oplog_timestamp() obj2 = ObjectId('4ff74db3f646462b38000002') first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts), 'ns': 'alpha.foo', '_id': ObjectId('4ff74db3f646462b38000001')} # try kill one, try restarting kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: mongos['alpha']['foo'].insert({'_id': obj2, 'name': 'paul'}) break except OperationFailure: time.sleep(1) count += 1 if count > 60: self.fail('Insert failed too many times in rollback') continue kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) # wait for master to be established while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) # wait for secondary to be established admin_db = new_primary_conn['admin'] while admin_db.command("replSetGetStatus")['myState'] != 2: time.sleep(1) while retry_until_ok(mongos['alpha']['foo'].find().count) != 1: time.sleep(1) self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY']) self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY']) last_ts = test_oplog.get_last_oplog_timestamp() second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts), 'ns': 'alpha.foo', '_id': obj2} test_oplog.doc_manager.upsert(first_doc) test_oplog.doc_manager.upsert(second_doc) test_oplog.rollback() test_oplog.doc_manager.commit() results = solr._search() self.assertEqual(len(results), 1) results_doc = results[0] self.assertEqual(results_doc['name'], 'paulie') self.assertTrue(results_doc['_ts'] <= bson_ts_to_long(cutoff_ts))
def test_rollback(self): """Test rollback in oplog_manager. Assertion failure if it doesn't pass We force a rollback by inserting a doc, killing the primary, inserting another doc, killing the new primary, and then restarting both. """ os.system('rm config.txt; touch config.txt') test_oplog, primary_conn, mongos, solr = self.get_new_oplog() if not start_cluster(): self.fail('Cluster could not be started successfully!') solr = DocManager() test_oplog.doc_manager = solr solr._delete() # equivalent to solr.delete(q='*: *') mongos['test']['test'].remove({}) mongos['test']['test'].insert( {'_id': ObjectId('4ff74db3f646462b38000001'), 'name': 'paulie'}, safe=True ) while (mongos['test']['test'].find().count() != 1): time.sleep(1) cutoff_ts = test_oplog.get_last_oplog_timestamp() first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts), 'ns': 'test.test', '_id': ObjectId('4ff74db3f646462b38000001')} #try kill one, try restarting kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: mongos['test']['test'].insert({ '_id': ObjectId('4ff74db3f646462b38000002'), 'name': 'paul'}, safe=True) break except OperationFailure: count += 1 if count > 60: self.fail('Call to insert doc failed too many times') time.sleep(1) continue while (mongos['test']['test'].find().count() != 2): time.sleep(1) kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) #wait for master to be established while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) #wait for secondary to be established admin = new_primary_conn['admin'] while admin.command("replSetGetStatus")['myState'] != 2: time.sleep(1) while retry_until_ok(mongos['test']['test'].find().count) != 1: time.sleep(1) self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY']) self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY']) last_ts = test_oplog.get_last_oplog_timestamp() second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts), 'ns': 'test.test', '_id': ObjectId('4ff74db3f646462b38000002')} test_oplog.doc_manager.upsert(first_doc) test_oplog.doc_manager.upsert(second_doc) test_oplog.rollback() test_oplog.doc_manager.commit() results = solr._search() assert(len(results) == 1) self.assertEqual(results[0]['name'], 'paulie') self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))
def test_rollback(self): """Test rollback in oplog_manager. Assertion failure if it doesn't pass We force a rollback by inserting a doc, killing primary, inserting another doc, killing the new primary, and then restarting both servers. """ os.system('rm %s; touch %s' % (CONFIG, CONFIG)) if not start_cluster(sharded=True): self.fail("Shards cannot be added to mongos") test_oplog, primary_conn, solr, mongos = self.get_new_oplog() solr = DocManager() test_oplog.doc_manager = solr solr._delete() # equivalent to solr.delete(q='*:*') safe_mongo_op(mongos['alpha']['foo'].remove, {}) safe_mongo_op(mongos['alpha']['foo'].insert, { '_id': ObjectId('4ff74db3f646462b38000001'), 'name': 'paulie' }) cutoff_ts = test_oplog.get_last_oplog_timestamp() obj2 = ObjectId('4ff74db3f646462b38000002') first_doc = { 'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts), 'ns': 'alpha.foo', '_id': ObjectId('4ff74db3f646462b38000001') } # try kill one, try restarting kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: mongos['alpha']['foo'].insert({'_id': obj2, 'name': 'paul'}) break except OperationFailure: time.sleep(1) count += 1 if count > 60: self.fail('Insert failed too many times in rollback') continue kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) # wait for master to be established while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) # wait for secondary to be established admin_db = new_primary_conn['admin'] while admin_db.command("replSetGetStatus")['myState'] != 2: time.sleep(1) while retry_until_ok(mongos['alpha']['foo'].find().count) != 1: time.sleep(1) self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY']) self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY']) last_ts = test_oplog.get_last_oplog_timestamp() second_doc = { 'name': 'paul', '_ts': bson_ts_to_long(last_ts), 'ns': 'alpha.foo', '_id': obj2 } test_oplog.doc_manager.upsert(first_doc) test_oplog.doc_manager.upsert(second_doc) test_oplog.rollback() test_oplog.doc_manager.commit() results = solr._search() self.assertEqual(len(results), 1) results_doc = results[0] self.assertEqual(results_doc['name'], 'paulie') self.assertTrue(results_doc['_ts'] <= bson_ts_to_long(cutoff_ts))