def setUp(self): if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector( "%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]), CONFIG, '%s:30000' % (HOSTNAME), ['test.test'], '_id', None, 'mongo_connector/doc_managers/mongo_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) while (len(self.mongo_doc._search()) != 0): time.sleep(1)
def setUp(self): """ Starts a new connector for every test """ if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector( '%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), CONFIG, 'http://localhost:9200', ['test.test'], '_id', None, 'mongo_connector/doc_managers/elastic_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) while (len(self.elastic_doc._search()) != 0): time.sleep(1)
def setUpClass(cls): # noqa """Creates a kafka and mongo cluster instance """ cls.zk = ZookeeperFixture.instance() cls.server = KafkaFixture.instance(0, cls.zk.host, cls.zk.port) cls.client = KafkaClient(cls.server.host, cls.server.port) cls.max_insertions = 100 cls.flag = start_cluster() #Clear our config file config = os.environ.get('CONFIG', "config.txt") open(config, 'w').close if cls.flag: try: cls.mongo_conn = Connection("%s:27217" % (socket.gethostname())) cls.mongo_db = cls.mongo_conn['test']['test'] except ConnectionFailure: print("Cannot connect locally!") cls.flag = False if cls.flag: cls.mongo_db = cls.mongo_conn['test']['test'] cls.conn = Connector( ("%s:27217" % (socket.gethostname())), config, "%s:%s" % (cls.server.host, cls.server.port), ['test.test'], '_id', None, "/home/tyler/doc_managers/kafka_doc_manager/kafka_doc_manager.py", None) cls.conn.start()
def test_write_oplog_progress(self): """Test write_oplog_progress under several circumstances """ os.system('touch %s' % (TEMP_CONFIG)) config_file_path = TEMP_CONFIG conn = Connector(MAIN_ADDRESS, config_file_path, None, ['test.test'], '_id', None, None) #test that None is returned if there is no config file specified. self.assertEqual(conn.write_oplog_progress(), None) conn.oplog_progress.get_dict()[1] = Timestamp(12, 34) #pretend to insert a thread/timestamp pair conn.write_oplog_progress() data = json.load(open(config_file_path, 'r')) self.assertEqual(1, int(data[0])) self.assertEqual(long_to_bson_ts(int(data[1])), Timestamp(12, 34)) #ensure the temp file was deleted self.assertFalse(os.path.exists(config_file_path + '~')) #ensure that updates work properly conn.oplog_progress.get_dict()[1] = Timestamp(44, 22) conn.write_oplog_progress() config_file = open(config_file_path, 'r') data = json.load(config_file) self.assertEqual(1, int(data[0])) self.assertEqual(long_to_bson_ts(int(data[1])), Timestamp(44, 22)) os.system('rm ' + config_file_path) config_file.close()
def test_connector(self): """Test whether the connector initiates properly """ if not self.flag: self.fail("Shards cannot be added to mongos") conn = Connector(MAIN_ADDRESS, CONFIG, None, ['test.test'], '_id', None, None) conn.start() while len(conn.shard_set) != 1: time.sleep(2) conn.join() self.assertFalse(conn.can_run) time.sleep(5) for thread in conn.shard_set.values(): self.assertFalse(thread.running)
def setUp(self): if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector("%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]), CONFIG, '%s:30000' % (HOSTNAME), ['test.test'], '_id', None, 'mongo_connector/doc_managers/mongo_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) while(len(self.mongo_doc._search()) != 0): time.sleep(1)
def test_read_oplog_progress(self): """Test read_oplog_progress """ conn = Connector(MAIN_ADDRESS, None, None, ['test.test'], '_id', None, None) #testing with no file self.assertEqual(conn.read_oplog_progress(), None) os.system('touch %s' % (TEMP_CONFIG)) conn.oplog_checkpoint = TEMP_CONFIG #testing with empty file self.assertEqual(conn.read_oplog_progress(), None) oplog_dict = conn.oplog_progress.get_dict() #add a value to the file, delete the dict, and then read in the value oplog_dict['oplog1'] = Timestamp(12, 34) conn.write_oplog_progress() del oplog_dict['oplog1'] self.assertEqual(len(oplog_dict), 0) conn.read_oplog_progress() self.assertTrue('oplog1' in oplog_dict.keys()) self.assertTrue(oplog_dict['oplog1'], Timestamp(12, 34)) oplog_dict['oplog1'] = Timestamp(55, 11) #see if oplog progress dict is properly updated conn.read_oplog_progress() self.assertTrue(oplog_dict['oplog1'], Timestamp(55, 11)) os.system('rm ' + TEMP_CONFIG)
def setUp(self): if not self.flag: self.fail(self.err_msg) self.connector = Connector( ('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN'])), CONFIG, 'http://localhost:8983/solr', ['test.test'], '_id', None, 'mongo_connector/doc_managers/solr_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: time.sleep(1) count = 0 while (True): try: self.conn['test']['test'].remove(safe=True) break except (AutoReconnect, OperationFailure): time.sleep(1) count += 1 if count > 60: unittest.SkipTest('Call to remove failed too ' 'many times in setup') while (len(self.solr_conn.search('*:*')) != 0): time.sleep(1)
def setUp(self): """ Starts a new connector for every test """ if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector( '%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), CONFIG, 'http://localhost:9200', ['test.test'], '_id', None, 'mongo_connector/doc_managers/elastic_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) while(len(self.elastic_doc._search()) != 0): time.sleep(1)
def setUpClass(cls): """ Initializes the cluster """ os.system('rm %s; touch %s' % (CONFIG, CONFIG)) use_mongos = True if PORTS_ONE['MONGOS'] != "27217": use_mongos = False cls.flag = start_cluster(use_mongos=use_mongos) if cls.flag: cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), replicaSet="demo-repl") timer = Timer(60, abort_test) cls.connector = Connector( "%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]), CONFIG, None, ['test.test'], '_id', None, None) cls.synchronizer = cls.connector.doc_manager timer.start() cls.connector.start() while len(cls.connector.shard_set) == 0: pass timer.cancel()
def setUp(self): if not self.flag: self.fail(self.err_msg) self.connector = Connector(('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN'])), CONFIG, 'http://localhost:8983/solr', ['test.test'], '_id', None, 'mongo_connector/doc_managers/solr_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: time.sleep(1) count = 0 while (True): try: self.conn['test']['test'].remove(safe=True) break except (AutoReconnect, OperationFailure): time.sleep(1) count += 1 if count > 60: unittest.SkipTest('Call to remove failed too ' 'many times in setup') while (len(self.solr_conn.search('*:*')) != 0): time.sleep(1)
class TestSynchronizer(unittest.TestCase): """ Tests Solr """ def runTest(self): """ Runs tests """ unittest.TestCase.__init__(self) @classmethod def setUpClass(cls): os.system('rm %s; touch %s' % (CONFIG, CONFIG)) cls.flag = start_cluster() if cls.flag: cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN']), replicaSet="demo-repl") # Creating a Solr object with an invalid URL # doesn't create an exception cls.solr_conn = Solr('http://localhost:8983/solr') try: cls.solr_conn.commit() except (SolrError, MissingSchema): cls.err_msg = "Cannot connect to Solr!" cls.flag = False if cls.flag: cls.solr_conn.delete(q='*:*') else: cls.err_msg = "Shards cannot be added to mongos" @classmethod def tearDownClass(cls): """ Kills cluster instance """ kill_all() def setUp(self): if not self.flag: self.fail(self.err_msg) self.connector = Connector( ('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN'])), CONFIG, 'http://localhost:8983/solr', ['test.test'], '_id', None, 'mongo_connector/doc_managers/solr_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: time.sleep(1) count = 0 while (True): try: self.conn['test']['test'].remove(safe=True) break except (AutoReconnect, OperationFailure): time.sleep(1) count += 1 if count > 60: unittest.SkipTest('Call to remove failed too ' 'many times in setup') while (len(self.solr_conn.search('*:*')) != 0): time.sleep(1) def tearDown(self): self.connector.doc_manager.auto_commit = False time.sleep(2) self.connector.join() def test_shard_length(self): """Tests the shard_length to see if the shard set was recognized """ self.assertEqual(len(self.connector.shard_set), 1) def test_initial(self): """Tests search and assures that the databases are clear. """ while (True): try: self.conn['test']['test'].remove(safe=True) break except OperationFailure: continue self.solr_conn.delete(q='*:*') self.assertEqual(self.conn['test']['test'].find().count(), 0) self.assertEqual(len(self.solr_conn.search('*:*')), 0) def test_insert(self): """Tests insert """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while (len(self.solr_conn.search('*:*')) == 0): time.sleep(1) result_set_1 = self.solr_conn.search('paulie') self.assertEqual(len(result_set_1), 1) result_set_2 = self.conn['test']['test'].find_one() for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) self.assertEqual(item['name'], result_set_2['name']) def test_remove(self): """Tests remove """ self.conn['test']['test'].remove({'name': 'paulie'}, safe=True) while (len(self.solr_conn.search('*:*')) == 1): time.sleep(1) result_set_1 = self.solr_conn.search('paulie') self.assertEqual(len(result_set_1), 0) def test_rollback(self): """Tests rollback. We force a rollback by inserting one doc, killing primary, adding another doc, killing the new primary, and restarting both the servers. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.solr_conn.search('*:*')) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: self.conn['test']['test'].insert({'name': 'pauline'}, safe=True) break except OperationFailure: count += 1 if count > 60: self.fail('Call to insert failed too ' 'many times in test_rollback') time.sleep(1) continue while (len(self.solr_conn.search('*:*')) != 2): time.sleep(1) result_set_1 = self.solr_conn.search('pauline') result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.solr_conn.search('pauline') self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('paul') self.assertEqual(len(result_set_2), 1) def test_stress(self): """Test stress by inserting and removing a large amount of docs. """ #stress test for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) time.sleep(5) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != NUMBER_OF_DOC_DIRS): time.sleep(5) for i in range(0, NUMBER_OF_DOC_DIRS): result_set_1 = self.solr_conn.search('Paul ' + str(i)) for item in result_set_1: self.assertEqual(item['_id'], item['_id']) def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. The rollback is performed the same way as before but with more docs """ self.conn['test']['test'].remove() while len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}, safe=True) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != NUMBER_OF_DOC_DIRS): time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS * 2)) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2, sort='_id asc') for item in result_set_1: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while (len( self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2)) != 0): time.sleep(15) result_set_1 = self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2) self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('Paul', rows=NUMBER_OF_DOC_DIRS * 2) self.assertEqual(len(result_set_2), NUMBER_OF_DOC_DIRS) def test_valid_fields(self): """ Tests documents with field definitions """ inserted_obj = self.conn['test']['test'].insert({'name': 'test_valid'}) self.conn['test']['test'].update({'_id': inserted_obj}, {'$set': { 'popularity': 1 }}) for _ in range(60): if len(self.connector.doc_manager._search("*:*")) != 0: break time.sleep(1) else: self.fail("Timeout when removing docs from Solr") result = self.connector.doc_manager.get_last_doc() self.assertIn('popularity', result) self.assertEqual( len(self.connector.doc_manager._search("name=test_valid")), 1) def test_invalid_fields(self): """ Tests documents without field definitions """ inserted_obj = self.conn['test']['test'].insert( {'name': 'test_invalid'}) self.conn['test']['test'].update({'_id': inserted_obj}, {'$set': { 'break_this_test': 1 }}) for _ in range(60): if len(self.connector.doc_manager._search("*:*")) != 0: break time.sleep(1) else: self.fail("Timeout when removing docs from Solr") result = self.connector.doc_manager.get_last_doc() self.assertNotIn('break_this_test', result) self.assertEqual( len(self.connector.doc_manager._search("name=test_invalid")), 1) def test_dynamic_fields(self): """ Tests dynamic field definitions The following field in the supplied schema.xml: <dynamicField name="*_i" type="int" indexed="true" stored="true"/> <dynamicField name="i_*" type="int" indexed="true" stored="true"/> """ inserted_obj = self.conn['test']['test'].insert({ 'name': 'test_dynamic', 'foo_i': 1, 'i_foo': 1 }) self.assertEqual(self.conn['test']['test'].find().count(), 1) for _ in range(60): if len(self.connector.doc_manager._search("*:*")) != 0: break time.sleep(1) else: self.fail("Timeout when removing docs from Solr") result = self.connector.doc_manager.get_last_doc() self.assertIn('i_foo', result) self.assertIn('foo_i', result) self.assertEqual(len(self.connector.doc_manager._search("i_foo:1")), 1) self.assertEqual(len(self.connector.doc_manager._search("foo_i:1")), 1)
class TestElastic(unittest.TestCase): """ Tests the Elastic instance """ def runTest(self): """ Runs the tests """ unittest.TestCase.__init__(self) @classmethod def setUpClass(cls): """ Starts the cluster """ os.system('rm %s; touch %s' % (CONFIG, CONFIG)) cls.elastic_doc = DocManager('http://localhost:9200', auto_commit=False) cls.elastic_doc._remove() cls.flag = start_cluster() if cls.flag: cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), replicaSet="demo-repl") import logging logger = logging.getLogger() loglevel = logging.INFO logger.setLevel(loglevel) @classmethod def tearDownClass(cls): """ Kills cluster instance """ kill_all() def tearDown(self): """ Ends the connector """ self.connector.doc_manager.auto_commit = False time.sleep(2) self.connector.join() def setUp(self): """ Starts a new connector for every test """ if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector( '%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), CONFIG, 'http://localhost:9200', ['test.test'], '_id', None, 'mongo_connector/doc_managers/elastic_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) while (len(self.elastic_doc._search()) != 0): time.sleep(1) def test_shard_length(self): """Tests the shard_length to see if the shard set was recognized properly """ self.assertEqual(len(self.connector.shard_set), 1) def test_initial(self): """Tests search and assures that the databases are clear. """ self.conn['test']['test'].remove(safe=True) self.assertEqual(self.conn['test']['test'].find().count(), 0) self.assertEqual(len(self.elastic_doc._search()), 0) def test_insert(self): """Tests insert """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while (len(self.elastic_doc._search()) == 0): time.sleep(1) result_set_1 = self.elastic_doc._search() self.assertEqual(len(result_set_1), 1) result_set_2 = self.conn['test']['test'].find_one() for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) self.assertEqual(item['name'], result_set_2['name']) def test_remove(self): """Tests remove """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while (len(self.elastic_doc._search()) != 1): time.sleep(1) self.conn['test']['test'].remove({'name': 'paulie'}, safe=True) while (len(self.elastic_doc._search()) == 1): time.sleep(1) result_set_1 = self.elastic_doc._search() self.assertEqual(len(result_set_1), 0) def test_rollback(self): """Tests rollback. We force a rollback by adding a doc, killing the primary, adding another doc, killing the new primary, and then restarting both. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.elastic_doc._search()) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: self.conn['test']['test'].insert({'name': 'pauline'}, safe=True) break except OperationFailure: time.sleep(1) count += 1 if count >= 60: sys.exit(1) continue while (len(self.elastic_doc._search()) != 2): time.sleep(1) result_set_1 = self.elastic_doc._search() result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 2) #make sure pauling is there for item in result_set_1: if item['name'] == 'pauline': self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.elastic_doc._search() self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['name'], 'paul') find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), 1) def test_stress(self): """Test stress by inserting and removing the number of documents specified in global variable """ for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) time.sleep(5) while len(self.elastic_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(5) for i in range(0, NUMBER_OF_DOC_DIRS): result_set_1 = self.elastic_doc._search() for item in result_set_1: if (item['name'] == 'Paul' + str(i)): self.assertEqual(item['_id'], item['_id']) def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Strategy for rollback is the same as before. """ while len(self.elastic_doc._search()) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}, safe=True) while len(self.elastic_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.elastic_doc._search()) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.elastic_doc._search() for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while (len(self.elastic_doc._search()) != NUMBER_OF_DOC_DIRS): time.sleep(5) result_set_1 = self.elastic_doc._search() self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS) def test_non_standard_fields(self): """ Tests ObjectIds, DBrefs, etc """ # This test can break if it attempts to insert before the dump takes # place- this prevents it (other tests affected too actually) while (self.connector.shard_set['demo-repl'].checkpoint is None): time.sleep(1) docs = [{ 'foo': [1, 2] }, { 'bar': { 'hello': 'world' } }, { 'code': Code("function x() { return 1; }") }, { 'dbref': { '_ref': DBRef('simple', ObjectId('509b8db456c02c5ab7e63c34')) } }] try: self.conn['test']['test'].insert(docs) except OperationFailure: self.fail("Cannot insert documents into Elastic!") for _ in range(1, 60): if (len(self.elastic_doc._search()) == len(docs)): break time.sleep(1) else: self.fail("Did not get all expected documents") self.assertIn("dbref", self.elastic_doc.get_last_doc())
class TestSynchronizer(unittest.TestCase): """ Tests the mongo instance """ def runTest(self): """ Runs the tests """ unittest.TestCase.__init__(self) @classmethod def setUpClass(cls): os.system('rm %s; touch %s' % (CONFIG, CONFIG)) start_single_mongod_instance("30000", "/MC", "MC_log") cls.mongo_doc = DocManager("%s:30000" % (HOSTNAME)) cls.mongo_doc._remove() cls.flag = start_cluster() if cls.flag: cls.conn = Connection("%s:%s" % (HOSTNAME, PORTS_ONE['MONGOS']), replicaSet="demo-repl") @classmethod def tearDownClass(cls): """ Kills cluster instance """ kill_mongo_proc(HOSTNAME, 30000) kill_all() def tearDown(self): self.connector.join() def setUp(self): if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector("%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]), CONFIG, '%s:30000' % (HOSTNAME), ['test.test'], '_id', None, 'mongo_connector/doc_managers/mongo_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) while(len(self.mongo_doc._search()) != 0): time.sleep(1) def test_shard_length(self): """Tests the shard_length to see if the shard set was recognized properly """ self.assertEqual(len(self.connector.shard_set), 1) def test_initial(self): """Tests search and assures that the databases are clear. """ self.conn['test']['test'].remove(safe=True) self.assertEqual(self.conn['test']['test'].find().count(), 0) self.assertEqual(len(self.mongo_doc._search()), 0) def test_insert(self): """Tests insert """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while(len(self.mongo_doc._search()) == 0): time.sleep(1) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), 1) result_set_2 = self.conn['test']['test'].find_one() for item in result_set_1: self.assertEqual(item['_id'], result_set_2['_id']) self.assertEqual(item['name'], result_set_2['name']) def test_remove(self): """Tests remove """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while(len(self.mongo_doc._search()) != 1): time.sleep(1) self.conn['test']['test'].remove({'name': 'paulie'}, safe=True) while(len(self.mongo_doc._search()) == 1): time.sleep(1) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), 0) def test_rollback(self): """Tests rollback. We force a rollback by adding a doc, killing the primary, adding another doc, killing the new primary, and then restarting both. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.mongo_doc._search()) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: result_set_1 = self.conn['test']['test'].insert( {'name': 'pauline'}, safe=True) break except OperationFailure: time.sleep(1) count += 1 if count >= 60: sys.exit(1) continue while(len(self.mongo_doc._search()) != 2): time.sleep(1) result_set_1 = self.mongo_doc._search() result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_2), 2) #make sure pauling is there for item in result_set_1: if item['name'] == 'pauline': self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['name'], 'paul') find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), 1) def test_stress(self): """Test stress by inserting and removing the number of documents specified in global variable """ for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) time.sleep(5) while len(self.mongo_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(5) for i in range(0, NUMBER_OF_DOC_DIRS): result_set_1 = self.mongo_doc._search() for item in result_set_1: if(item['name'] == 'Paul' + str(i)): self.assertEqual(item['_id'], item['_id']) def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Strategy for rollback is the same as before. """ while len(self.mongo_doc._search()) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}, safe=True) while len(self.mongo_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert({'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.mongo_doc._search()) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.mongo_doc._search() for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while(len(self.mongo_doc._search()) != NUMBER_OF_DOC_DIRS): time.sleep(5) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS)
class TestSynchronizer(unittest.TestCase): """ Tests Solr """ def runTest(self): """ Runs tests """ unittest.TestCase.__init__(self) @classmethod def setUpClass(cls): os.system('rm %s; touch %s' % (CONFIG, CONFIG)) cls.flag = start_cluster() if cls.flag: cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN']), replicaSet="demo-repl") # Creating a Solr object with an invalid URL # doesn't create an exception cls.solr_conn = Solr('http://localhost:8983/solr') try: cls.solr_conn.commit() except (SolrError, MissingSchema): cls.err_msg = "Cannot connect to Solr!" cls.flag = False if cls.flag: cls.solr_conn.delete(q='*:*') else: cls.err_msg = "Shards cannot be added to mongos" @classmethod def tearDownClass(cls): """ Kills cluster instance """ kill_all() def setUp(self): if not self.flag: self.fail(self.err_msg) self.connector = Connector(('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN'])), CONFIG, 'http://localhost:8983/solr', ['test.test'], '_id', None, 'mongo_connector/doc_managers/solr_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: time.sleep(1) count = 0 while (True): try: self.conn['test']['test'].remove(safe=True) break except (AutoReconnect, OperationFailure): time.sleep(1) count += 1 if count > 60: unittest.SkipTest('Call to remove failed too ' 'many times in setup') while (len(self.solr_conn.search('*:*')) != 0): time.sleep(1) def tearDown(self): self.connector.doc_manager.auto_commit = False time.sleep(2) self.connector.join() def test_shard_length(self): """Tests the shard_length to see if the shard set was recognized """ self.assertEqual(len(self.connector.shard_set), 1) def test_initial(self): """Tests search and assures that the databases are clear. """ while (True): try: self.conn['test']['test'].remove(safe=True) break except OperationFailure: continue self.solr_conn.delete(q='*:*') self.assertEqual(self.conn['test']['test'].find().count(), 0) self.assertEqual(len(self.solr_conn.search('*:*')), 0) def test_insert(self): """Tests insert """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while (len(self.solr_conn.search('*:*')) == 0): time.sleep(1) result_set_1 = self.solr_conn.search('paulie') self.assertEqual(len(result_set_1), 1) result_set_2 = self.conn['test']['test'].find_one() for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) self.assertEqual(item['name'], result_set_2['name']) def test_remove(self): """Tests remove """ self.conn['test']['test'].remove({'name': 'paulie'}, safe=True) while (len(self.solr_conn.search('*:*')) == 1): time.sleep(1) result_set_1 = self.solr_conn.search('paulie') self.assertEqual(len(result_set_1), 0) def test_rollback(self): """Tests rollback. We force a rollback by inserting one doc, killing primary, adding another doc, killing the new primary, and restarting both the servers. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.solr_conn.search('*:*')) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: self.conn['test']['test'].insert( {'name': 'pauline'}, safe=True) break except OperationFailure: count += 1 if count > 60: self.fail('Call to insert failed too ' 'many times in test_rollback') time.sleep(1) continue while (len(self.solr_conn.search('*:*')) != 2): time.sleep(1) result_set_1 = self.solr_conn.search('pauline') result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.solr_conn.search('pauline') self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('paul') self.assertEqual(len(result_set_2), 1) def test_stress(self): """Test stress by inserting and removing a large amount of docs. """ #stress test for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) time.sleep(5) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != NUMBER_OF_DOC_DIRS): time.sleep(5) for i in range(0, NUMBER_OF_DOC_DIRS): result_set_1 = self.solr_conn.search('Paul ' + str(i)) for item in result_set_1: self.assertEqual(item['_id'], item['_id']) def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. The rollback is performed the same way as before but with more docs """ self.conn['test']['test'].remove() while len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert( {'name': 'Paul ' + str(i)}, safe=True) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS)) != NUMBER_OF_DOC_DIRS): time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin_db = new_primary_conn['admin'] while admin_db.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.solr_conn.search('*:*', rows=NUMBER_OF_DOC_DIRS * 2)) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2, sort='_id asc') for item in result_set_1: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while (len(self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2)) != 0): time.sleep(15) result_set_1 = self.solr_conn.search('Pauline', rows=NUMBER_OF_DOC_DIRS * 2) self.assertEqual(len(result_set_1), 0) result_set_2 = self.solr_conn.search('Paul', rows=NUMBER_OF_DOC_DIRS * 2) self.assertEqual(len(result_set_2), NUMBER_OF_DOC_DIRS) def test_valid_fields(self): """ Tests documents with field definitions """ inserted_obj = self.conn['test']['test'].insert( {'name':'test_valid'}) self.conn['test']['test'].update({'_id' : inserted_obj}, {'$set':{'popularity' : 1 }}) for _ in range(60): if len(self.connector.doc_manager._search("*:*")) != 0: break time.sleep(1) else: self.fail("Timeout when removing docs from Solr") result = self.connector.doc_manager.get_last_doc() self.assertIn('popularity', result) self.assertEqual(len(self.connector.doc_manager._search( "name=test_valid")), 1) def test_invalid_fields(self): """ Tests documents without field definitions """ inserted_obj = self.conn['test']['test'].insert( {'name':'test_invalid'}) self.conn['test']['test'].update({'_id' : inserted_obj}, {'$set':{'break_this_test' : 1 }}) for _ in range(60): if len(self.connector.doc_manager._search("*:*")) != 0: break time.sleep(1) else: self.fail("Timeout when removing docs from Solr") result = self.connector.doc_manager.get_last_doc() self.assertNotIn('break_this_test', result) self.assertEqual(len(self.connector.doc_manager._search( "name=test_invalid")), 1) def test_dynamic_fields(self): """ Tests dynamic field definitions The following field in the supplied schema.xml: <dynamicField name="*_i" type="int" indexed="true" stored="true"/> <dynamicField name="i_*" type="int" indexed="true" stored="true"/> """ inserted_obj = self.conn['test']['test'].insert({ 'name':'test_dynamic', 'foo_i':1, 'i_foo':1}) self.assertEqual(self.conn['test']['test'].find().count(), 1) for _ in range(60): if len(self.connector.doc_manager._search("*:*")) != 0: break time.sleep(1) else: self.fail("Timeout when removing docs from Solr") result = self.connector.doc_manager.get_last_doc() self.assertIn('i_foo', result) self.assertIn('foo_i', result) self.assertEqual(len(self.connector.doc_manager._search( "i_foo:1")), 1) self.assertEqual(len(self.connector.doc_manager._search( "foo_i:1")), 1)
class TestSynchronizer(unittest.TestCase): """ Tests the mongo instance """ def runTest(self): """ Runs the tests """ unittest.TestCase.__init__(self) @classmethod def setUpClass(cls): os.system('rm %s; touch %s' % (CONFIG, CONFIG)) start_single_mongod_instance("30000", "/MC", "MC_log") cls.mongo_doc = DocManager("%s:30000" % (HOSTNAME)) cls.mongo_doc._remove() cls.flag = start_cluster() if cls.flag: cls.conn = Connection("%s:%s" % (HOSTNAME, PORTS_ONE['MONGOS']), replicaSet="demo-repl") @classmethod def tearDownClass(cls): """ Kills cluster instance """ kill_mongo_proc(HOSTNAME, 30000) kill_all() def tearDown(self): self.connector.join() def setUp(self): if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector( "%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]), CONFIG, '%s:30000' % (HOSTNAME), ['test.test'], '_id', None, 'mongo_connector/doc_managers/mongo_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) while (len(self.mongo_doc._search()) != 0): time.sleep(1) def test_shard_length(self): """Tests the shard_length to see if the shard set was recognized properly """ self.assertEqual(len(self.connector.shard_set), 1) def test_initial(self): """Tests search and assures that the databases are clear. """ self.conn['test']['test'].remove(safe=True) self.assertEqual(self.conn['test']['test'].find().count(), 0) self.assertEqual(len(self.mongo_doc._search()), 0) def test_insert(self): """Tests insert """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while (len(self.mongo_doc._search()) == 0): time.sleep(1) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), 1) result_set_2 = self.conn['test']['test'].find_one() for item in result_set_1: self.assertEqual(item['_id'], result_set_2['_id']) self.assertEqual(item['name'], result_set_2['name']) def test_remove(self): """Tests remove """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while (len(self.mongo_doc._search()) != 1): time.sleep(1) self.conn['test']['test'].remove({'name': 'paulie'}, safe=True) while (len(self.mongo_doc._search()) == 1): time.sleep(1) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), 0) def test_rollback(self): """Tests rollback. We force a rollback by adding a doc, killing the primary, adding another doc, killing the new primary, and then restarting both. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.mongo_doc._search()) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: result_set_1 = self.conn['test']['test'].insert( {'name': 'pauline'}, safe=True) break except OperationFailure: time.sleep(1) count += 1 if count >= 60: sys.exit(1) continue while (len(self.mongo_doc._search()) != 2): time.sleep(1) result_set_1 = self.mongo_doc._search() result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_2), 2) #make sure pauling is there for item in result_set_1: if item['name'] == 'pauline': self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['name'], 'paul') find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), 1) def test_stress(self): """Test stress by inserting and removing the number of documents specified in global variable """ for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) time.sleep(5) while len(self.mongo_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(5) for i in range(0, NUMBER_OF_DOC_DIRS): result_set_1 = self.mongo_doc._search() for item in result_set_1: if (item['name'] == 'Paul' + str(i)): self.assertEqual(item['_id'], item['_id']) def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Strategy for rollback is the same as before. """ while len(self.mongo_doc._search()) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}, safe=True) while len(self.mongo_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.mongo_doc._search()) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.mongo_doc._search() for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], result_set_2['_id']) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while (len(self.mongo_doc._search()) != NUMBER_OF_DOC_DIRS): time.sleep(5) result_set_1 = self.mongo_doc._search() self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS)
class TestElastic(unittest.TestCase): """ Tests the Elastic instance """ def runTest(self): """ Runs the tests """ unittest.TestCase.__init__(self) @classmethod def setUpClass(cls): """ Starts the cluster """ os.system('rm %s; touch %s' % (CONFIG, CONFIG)) cls.elastic_doc = DocManager('http://localhost:9200', auto_commit=False) cls.elastic_doc._remove() cls.flag = start_cluster() if cls.flag: cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), replicaSet="demo-repl") import logging logger = logging.getLogger() loglevel = logging.INFO logger.setLevel(loglevel) @classmethod def tearDownClass(cls): """ Kills cluster instance """ kill_all() def tearDown(self): """ Ends the connector """ self.connector.doc_manager.auto_commit = False time.sleep(2) self.connector.join() def setUp(self): """ Starts a new connector for every test """ if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector( '%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), CONFIG, 'http://localhost:9200', ['test.test'], '_id', None, 'mongo_connector/doc_managers/elastic_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) while(len(self.elastic_doc._search()) != 0): time.sleep(1) def test_shard_length(self): """Tests the shard_length to see if the shard set was recognized properly """ self.assertEqual(len(self.connector.shard_set), 1) def test_initial(self): """Tests search and assures that the databases are clear. """ self.conn['test']['test'].remove(safe=True) self.assertEqual(self.conn['test']['test'].find().count(), 0) self.assertEqual(len(self.elastic_doc._search()), 0) def test_insert(self): """Tests insert """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while(len(self.elastic_doc._search()) == 0): time.sleep(1) result_set_1 = self.elastic_doc._search() self.assertEqual(len(result_set_1), 1) result_set_2 = self.conn['test']['test'].find_one() for item in result_set_1: self.assertEqual(item['_id'], str(result_set_2['_id'])) self.assertEqual(item['name'], result_set_2['name']) def test_remove(self): """Tests remove """ self.conn['test']['test'].insert({'name': 'paulie'}, safe=True) while(len(self.elastic_doc._search()) != 1): time.sleep(1) self.conn['test']['test'].remove({'name': 'paulie'}, safe=True) while(len(self.elastic_doc._search()) == 1): time.sleep(1) result_set_1 = self.elastic_doc._search() self.assertEqual(len(result_set_1), 0) def test_rollback(self): """Tests rollback. We force a rollback by adding a doc, killing the primary, adding another doc, killing the new primary, and then restarting both. """ primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) self.conn['test']['test'].insert({'name': 'paul'}, safe=True) while self.conn['test']['test'].find({'name': 'paul'}).count() != 1: time.sleep(1) while len(self.elastic_doc._search()) != 1: time.sleep(1) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = 0 while True: try: self.conn['test']['test'].insert( {'name': 'pauline'}, safe=True) break except OperationFailure: time.sleep(1) count += 1 if count >= 60: sys.exit(1) continue while(len(self.elastic_doc._search()) != 2): time.sleep(1) result_set_1 = self.elastic_doc._search() result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'}) self.assertEqual(len(result_set_1), 2) #make sure pauling is there for item in result_set_1: if item['name'] == 'pauline': self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) time.sleep(2) result_set_1 = self.elastic_doc._search() self.assertEqual(len(result_set_1), 1) for item in result_set_1: self.assertEqual(item['name'], 'paul') find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), 1) def test_stress(self): """Test stress by inserting and removing the number of documents specified in global variable """ for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}) time.sleep(5) while len(self.elastic_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(5) for i in range(0, NUMBER_OF_DOC_DIRS): result_set_1 = self.elastic_doc._search() for item in result_set_1: if(item['name'] == 'Paul' + str(i)): self.assertEqual(item['_id'], item['_id']) def test_stressed_rollback(self): """Test stressed rollback with number of documents equal to specified in global variable. Strategy for rollback is the same as before. """ while len(self.elastic_doc._search()) != 0: time.sleep(1) for i in range(0, NUMBER_OF_DOC_DIRS): self.conn['test']['test'].insert({'name': 'Paul ' + str(i)}, safe=True) while len(self.elastic_doc._search()) != NUMBER_OF_DOC_DIRS: time.sleep(1) primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY']) new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY'])) admin = new_primary_conn['admin'] while admin.command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(5) count = -1 while count + 1 < NUMBER_OF_DOC_DIRS: try: count += 1 self.conn['test']['test'].insert( {'name': 'Pauline ' + str(count)}, safe=True) except (OperationFailure, AutoReconnect): time.sleep(1) while (len(self.elastic_doc._search()) != self.conn['test']['test'].find().count()): time.sleep(1) result_set_1 = self.elastic_doc._search() for item in result_set_1: if 'Pauline' in item['name']: result_set_2 = self.conn['test']['test'].find_one( {'name': item['name']}) self.assertEqual(item['_id'], str(result_set_2['_id'])) kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY']) start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a", "/replset1a.log", None) while primary_conn['admin'].command("isMaster")['ismaster'] is False: time.sleep(1) time.sleep(1) start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b", "/replset1b.log", None) while(len(self.elastic_doc._search()) != NUMBER_OF_DOC_DIRS): time.sleep(5) result_set_1 = self.elastic_doc._search() self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS) for item in result_set_1: self.assertTrue('Paul' in item['name']) find_cursor = retry_until_ok(self.conn['test']['test'].find) self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS) def test_non_standard_fields(self): """ Tests ObjectIds, DBrefs, etc """ # This test can break if it attempts to insert before the dump takes # place- this prevents it (other tests affected too actually) while (self.connector.shard_set['demo-repl'].checkpoint is None): time.sleep(1) docs = [ {'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code("function x() { return 1; }")}, {'dbref': {'_ref': DBRef('simple', ObjectId('509b8db456c02c5ab7e63c34'))}} ] try: self.conn['test']['test'].insert(docs) except OperationFailure: self.fail("Cannot insert documents into Elastic!") for _ in range(1, 60): if (len(self.elastic_doc._search()) == len(docs)): break time.sleep(1) else: self.fail("Did not get all expected documents") self.assertIn("dbref", self.elastic_doc.get_last_doc())