def test_connector_minimum_privileges(self): """Test the Connector works with a user with minimum privileges.""" if not (db_user and db_password): raise SkipTest('Need to set a user/password to test this.') client = self.repl_set.client() minimum_user = '******' minimum_pwd = 'password' client.admin.add_user(minimum_user, minimum_pwd, roles=[{'role': 'read', 'db': 'test'}, {'role': 'read', 'db': 'wildcard'}, {'role': 'read', 'db': 'local'}]) client.test.test.insert_one({"replicated": 1}) client.test.ignored.insert_one({"replicated": 0}) client.ignored.ignored.insert_one({"replicated": 0}) client.wildcard.test.insert_one({"replicated": 1}) conn = Connector( mongo_address=self.repl_set.primary.uri, auth_username=minimum_user, auth_key=minimum_pwd, namespace_options={'test.test': True, 'wildcard.*': True} ) conn.start() try: assert_soon(conn.doc_managers[0]._search) finally: conn.join()
def setUp(self): if not self.flag: self.fail(self.err_msg) self.connector = Connector( address=('%s:%s' % (HOSTNAME, PORTS_ONE['MAIN'])), oplog_checkpoint=CONFIG, target_url='http://localhost:8983/solr', ns_set=['test.test'], u_key='_id', auth_key=None, doc_manager='mongo_connector/doc_managers/solr_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: time.sleep(1) count = 0 while (True): try: self.conn['test']['test'].remove(safe=True) break except (AutoReconnect, OperationFailure): time.sleep(1) count += 1 if count > 60: unittest.SkipTest('Call to remove failed too ' 'many times in setup') while (len(self.solr_conn.search('*:*')) != 0): time.sleep(1)
def setUpClass(cls): """ Initializes the cluster """ os.system('rm %s; touch %s' % (CONFIG, CONFIG)) use_mongos = True if PORTS_ONE['MONGOS'] != "27217": use_mongos = False cls.flag = start_cluster(use_mongos=use_mongos) if cls.flag: cls.conn = Connection('%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), replicaSet="demo-repl") timer = Timer(60, abort_test) cls.connector = Connector(address="%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]), oplog_checkpoint=CONFIG, target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) cls.synchronizer = cls.connector.doc_managers[0] timer.start() cls.connector.start() while len(cls.connector.shard_set) == 0: pass timer.cancel()
def setUp(self): """ Starts a new connector for every test """ try: os.unlink("config.txt") except OSError: pass open("config.txt", "w").close() self.connector = Connector( address='%s:%s' % (mongo_host, self.primary_p), oplog_checkpoint='config.txt', target_url=elastic_pair, ns_set=['test.test'], u_key='_id', auth_key=None, doc_manager='mongo_connector/doc_managers/elastic_doc_manager.py', auto_commit_interval=0) # Clean out test databases try: self.elastic_doc._remove() except OperationFailed: try: # Create test.test index if necessary client = Elasticsearch(hosts=[elastic_pair]) idx_client = IndicesClient(client) idx_client.create(index='test.test') except es_exceptions.TransportError: pass self.conn.test.test.drop() self.connector.start() assert_soon(lambda: len(self.connector.shard_set) > 0) assert_soon(lambda: sum(1 for _ in self.elastic_doc._search()) == 0)
def setUp(self): if db_user and db_password: auth_args = dict(auth_username=db_user, auth_key=db_password) else: auth_args = {} self.cluster = ShardedClusterSingle().start() self.dm = DocManager() self.connector = Connector(mongo_address=self.cluster.uri, doc_managers=[self.dm], **auth_args) self.connector.start()
def setUp(self): self.connector = Connector( address='%s:%s' % (mongo_host, self.primary_p), oplog_checkpoint="config.txt", target_url='%s:%d' % (mongo_host, self.standalone_port), ns_set=['test.test'], u_key='_id', auth_key=None, doc_manager='mongo_connector/doc_managers/mongo_doc_manager.py') self.connector.start() assert_soon(lambda: len(self.connector.shard_set) > 0) self.conn['test']['test'].remove() assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) == 0)
def setUpClass(cls): if db_user and db_password: auth_args = dict(auth_username=db_user, auth_key=db_password) else: auth_args = {} cls.cluster = ShardedClusterSingle().start() cls.main_uri = cls.cluster.uri + '/?readPreference=primaryPreferred' cls.dm = DocManager() cls.connector = Connector(mongo_address=cls.main_uri, doc_managers=[cls.dm], **auth_args) cls.connector.start() assert_soon(lambda: len(cls.connector.shard_set) == 2, message='connector failed to find both shards!')
def test_start_with_auth(self): dm = DocManager() connector = Connector(mongo_address=self.cluster.uri, doc_managers=[dm], auth_username=db_user, auth_key=db_password) connector.start() # Insert some documents into the sharded cluster. These # should go to the DocManager, and the connector should not # have an auth failure. self.cluster.client().test.test.insert_one({'auth_failure': False}) assert_soon(lambda: len(dm._search()) > 0) connector.join()
def setUp(self): if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector( address="%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]), oplog_checkpoint=CONFIG, target_url='%s:30000' % (HOSTNAME), ns_set=['test.test'], u_key='_id', auth_key=None, doc_manager='mongo_connector/doc_managers/mongo_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) wait_for(lambda: sum(1 for _ in self.mongo_doc._search()) == 0)
def test_connector(self): """Test whether the connector initiates properly """ conn = Connector(mongo_address=self.repl_set.uri, ns_set=['test.test'], **connector_opts) conn.start() while len(conn.shard_set) != 1: time.sleep(2) conn.join() self.assertFalse(conn.can_run) time.sleep(5) for thread in conn.shard_set.values(): self.assertFalse(thread.running)
def test_read_oplog_progress(self): """Test read_oplog_progress """ conn = Connector(address='%s:%d' % (mongo_host, self.primary_p), oplog_checkpoint=None, target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) #testing with no file self.assertEqual(conn.read_oplog_progress(), None) try: os.unlink("temp_config.txt") except OSError: pass open("temp_config.txt", "w").close() conn.oplog_checkpoint = "temp_config.txt" #testing with empty file self.assertEqual(conn.read_oplog_progress(), None) oplog_dict = conn.oplog_progress.get_dict() #add a value to the file, delete the dict, and then read in the value oplog_dict['oplog1'] = Timestamp(12, 34) conn.write_oplog_progress() del oplog_dict['oplog1'] self.assertEqual(len(oplog_dict), 0) conn.read_oplog_progress() oplog_dict = conn.oplog_progress.get_dict() self.assertTrue('oplog1' in oplog_dict.keys()) self.assertTrue(oplog_dict['oplog1'], Timestamp(12, 34)) oplog_dict['oplog1'] = Timestamp(55, 11) #see if oplog progress dict is properly updated conn.read_oplog_progress() self.assertTrue(oplog_dict['oplog1'], Timestamp(55, 11)) os.unlink("temp_config.txt")
def test_read_oplog_progress(self): """Test read_oplog_progress """ conn = Connector( mongo_address=self.repl_set.uri, oplog_checkpoint=None, **connector_opts ) # testing with no file self.assertEqual(conn.read_oplog_progress(), None) try: os.unlink("temp_oplog.timestamp") except OSError: pass open("temp_oplog.timestamp", "w").close() conn.oplog_checkpoint = "temp_oplog.timestamp" # testing with empty file self.assertEqual(conn.read_oplog_progress(), None) oplog_dict = conn.oplog_progress.get_dict() # add a value to the file, delete the dict, and then read in the value oplog_dict['oplog1'] = Timestamp(12, 34) conn.write_oplog_progress() del oplog_dict['oplog1'] self.assertEqual(len(oplog_dict), 0) conn.read_oplog_progress() oplog_dict = conn.oplog_progress.get_dict() self.assertTrue('oplog1' in oplog_dict.keys()) self.assertTrue(oplog_dict['oplog1'], Timestamp(12, 34)) oplog_dict['oplog1'] = Timestamp(55, 11) # see if oplog progress dict is properly updated conn.read_oplog_progress() self.assertTrue(oplog_dict['oplog1'], Timestamp(55, 11)) os.unlink("temp_oplog.timestamp")
def setUp(self): try: os.unlink("oplog.timestamp") except OSError: pass self._remove() self.connector = Connector(mongo_address=self.repl_set.uri, ns_set=['test.test'], doc_managers=(self.mongo_doc, ), gridfs_set=['test.test'], **connector_opts) self.conn.drop_database('test') self.connector.start() assert_soon(lambda: len(self.connector.shard_set) > 0) assert_soon(lambda: sum(1 for _ in self._search()) == 0)
def test_connector(self): """Test whether the connector initiates properly """ conn = Connector(mongo_address=self.repl_set.uri, **connector_opts) conn.start() assert_soon(lambda: bool(conn.shard_set)) # Make sure get_mininum_mongodb_version returns the current version. self.assertEqual(Version.from_client(self.repl_set.client()), get_mininum_mongodb_version()) conn.join() # Make sure the connector is shutdown correctly self.assertFalse(conn.can_run) for thread in conn.shard_set.values(): self.assertFalse(thread.running)
def setUpClass(cls): """ Initializes the cluster """ try: os.unlink("oplog.timestamp") except OSError: pass open("oplog.timestamp", "w").close() cls.repl_set = ReplicaSet().start() cls.conn = cls.repl_set.client() cls.connector = Connector(mongo_address=cls.repl_set.uri, ns_set=['test.test'], **connector_opts) cls.synchronizer = cls.connector.doc_managers[0] cls.connector.start() assert_soon(lambda: len(cls.connector.shard_set) != 0)
def setUp(self): self._remove() try: os.unlink("oplog.timestamp") except OSError: pass open("oplog.timestamp", "w").close() docman = DocManager(solr_url, auto_commit_interval=0) self.connector = Connector(mongo_address=self.repl_set.uri, ns_set=['test.test'], doc_managers=(docman, ), gridfs_set=['test.test']) retry_until_ok(self.conn.test.test.drop) retry_until_ok(self.conn.test.test.files.drop) retry_until_ok(self.conn.test.test.chunks.drop) self._remove() self.connector.start() assert_soon(lambda: len(self.connector.shard_set) > 0)
def setUp(self): """ Starts a new connector for every test """ if not self.flag: self.fail("Shards cannot be added to mongos") self.connector = Connector( address='%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']), oplog_checkpoint=CONFIG, target_url='localhost:9200', ns_set=['test.test'], u_key='_id', auth_key=None, doc_manager='mongo_connector/doc_managers/elastic_doc_manager.py') self.connector.start() while len(self.connector.shard_set) == 0: pass self.conn['test']['test'].remove(safe=True) wait_for(lambda: sum(1 for _ in self.elastic_doc._search()) == 0)
def test_connector_minimum_privileges(self): """Test the Connector works with a user with minimum privileges.""" if not (db_user and db_password): raise SkipTest("Need to set a user/password to test this.") client = self.repl_set.client() minimum_user = "******" minimum_pwd = "password" client.admin.add_user( minimum_user, minimum_pwd, roles=[ { "role": "read", "db": "test" }, { "role": "read", "db": "wildcard" }, { "role": "read", "db": "local" }, ], ) client.test.test.insert_one({"replicated": 1}) client.test.ignored.insert_one({"replicated": 0}) client.ignored.ignored.insert_one({"replicated": 0}) client.wildcard.test.insert_one({"replicated": 1}) conn = Connector( mongo_address=self.repl_set.primary.uri, auth_username=minimum_user, auth_key=minimum_pwd, namespace_options={ "test.test": True, "wildcard.*": True }, ) conn.start() try: assert_soon(conn.doc_managers[0]._search) finally: conn.join()
def setUp(self): """Start a new Connector for each test.""" super(TestElastic, self).setUp() try: os.unlink("oplog.timestamp") except OSError: pass self.connector = Connector(mongo_address=self.repl_set.uri, ns_set=['test.test'], doc_managers=(self.elastic_doc, ), gridfs_set=['test.test']) self.conn.test.test.drop() self.conn.test.test.files.drop() self.conn.test.test.chunks.drop() self.connector.start() assert_soon(lambda: len(self.connector.shard_set) > 0) assert_soon(lambda: self._count() == 0)
def test_connector(self): """Test whether the connector initiates properly """ conn = Connector(address='%s:%d' % (mongo_host, self.primary_p), oplog_checkpoint='config.txt', target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) conn.start() while len(conn.shard_set) != 1: time.sleep(2) conn.join() self.assertFalse(conn.can_run) time.sleep(5) for thread in conn.shard_set.values(): self.assertFalse(thread.running)
def setUp(self): try: os.unlink("config.txt") except OSError: pass open("config.txt", "w").close() self.connector = Connector( address='%s:%s' % (mongo_host, self.primary_p), oplog_checkpoint='config.txt', target_url='http://localhost:8983/solr', ns_set=['test.test'], u_key='_id', auth_key=None, doc_manager='mongo_connector/doc_managers/solr_doc_manager.py', auto_commit_interval=0) self.connector.start() assert_soon(lambda: len(self.connector.shard_set) > 0) retry_until_ok(self.conn.test.test.remove) assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 0)
def test_read_oplog_progress(self): """Test read_oplog_progress """ conn = Connector(address=MAIN_ADDRESS, oplog_checkpoint=None, target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) #testing with no file self.assertEqual(conn.read_oplog_progress(), None) os.system('touch %s' % (TEMP_CONFIG)) conn.oplog_checkpoint = TEMP_CONFIG #testing with empty file self.assertEqual(conn.read_oplog_progress(), None) oplog_dict = conn.oplog_progress.get_dict() #add a value to the file, delete the dict, and then read in the value oplog_dict['oplog1'] = Timestamp(12, 34) conn.write_oplog_progress() del oplog_dict['oplog1'] self.assertEqual(len(oplog_dict), 0) conn.read_oplog_progress() self.assertTrue('oplog1' in oplog_dict.keys()) self.assertTrue(oplog_dict['oplog1'], Timestamp(12, 34)) oplog_dict['oplog1'] = Timestamp(55, 11) #see if oplog progress dict is properly updated conn.read_oplog_progress() self.assertTrue(oplog_dict['oplog1'], Timestamp(55, 11)) os.system('rm ' + TEMP_CONFIG)
def test_write_oplog_progress(self): """Test write_oplog_progress under several circumstances """ try: os.unlink("temp_config.txt") except OSError: pass open("temp_config.txt", "w").close() conn = Connector(address='%s:%d' % (mongo_host, self.primary_p), oplog_checkpoint="temp_config.txt", target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) #test that None is returned if there is no config file specified. self.assertEqual(conn.write_oplog_progress(), None) conn.oplog_progress.get_dict()[1] = Timestamp(12, 34) #pretend to insert a thread/timestamp pair conn.write_oplog_progress() data = json.load(open("temp_config.txt", 'r')) self.assertEqual(1, int(data[0])) self.assertEqual(long_to_bson_ts(int(data[1])), Timestamp(12, 34)) #ensure the temp file was deleted self.assertFalse(os.path.exists("temp_config.txt" + '~')) #ensure that updates work properly conn.oplog_progress.get_dict()[1] = Timestamp(44, 22) conn.write_oplog_progress() config_file = open("temp_config.txt", 'r') data = json.load(config_file) self.assertEqual(1, int(data[0])) self.assertEqual(long_to_bson_ts(int(data[1])), Timestamp(44, 22)) config_file.close() os.unlink("temp_config.txt")
def setUp(self): """Start a new Connector for each test.""" try: os.unlink("oplog.timestamp") except OSError: pass open("oplog.timestamp", "w").close() docman = DocManager('http://localhost:7474/db/data', auto_commit_interval=0) self.connector = Connector(mongo_address=self.repl_set.uri, ns_set=['test.test'], doc_managers=(docman, ), gridfs_set=['test.test']) self.conn.test.test.drop() self.conn.test.test.files.drop() self.conn.test.test.chunks.drop() self.connector.start() self.neo4j_conn.delete_all() assert_soon(lambda: len(self.connector.shard_set) > 0) assert_soon(lambda: self._count() == 0) time.sleep(5)
def test_write_oplog_progress(self): """Test write_oplog_progress under several circumstances """ try: os.unlink("temp_oplog.timestamp") except OSError: pass open("temp_oplog.timestamp", "w").close() conn = Connector( mongo_address=self.repl_set.uri, oplog_checkpoint="temp_oplog.timestamp", **connector_opts ) # test that None is returned if there is no config file specified. self.assertEqual(conn.write_oplog_progress(), None) conn.oplog_progress.get_dict()[1] = Timestamp(12, 34) # pretend to insert a thread/timestamp pair conn.write_oplog_progress() data = json.load(open("temp_oplog.timestamp", 'r')) self.assertEqual(1, int(data[0])) self.assertEqual(long_to_bson_ts(int(data[1])), Timestamp(12, 34)) # ensure the temp file was deleted self.assertFalse(os.path.exists("temp_oplog.timestamp" + '~')) # ensure that updates work properly conn.oplog_progress.get_dict()[1] = Timestamp(44, 22) conn.write_oplog_progress() config_file = open("temp_oplog.timestamp", 'r') data = json.load(config_file) self.assertEqual(1, int(data[0])) self.assertEqual(long_to_bson_ts(int(data[1])), Timestamp(44, 22)) config_file.close() os.unlink("temp_oplog.timestamp")
def setUpClass(cls): """ Initializes the cluster """ try: os.unlink("config.txt") except OSError: pass open("config.txt", "w").close() _, _, cls.primary_p = start_replica_set('test-synchronizer') cls.conn = MongoClient('%s:%d' % (mongo_host, cls.primary_p), replicaSet='test-synchronizer') cls.connector = Connector(address='%s:%d' % (mongo_host, cls.primary_p), oplog_checkpoint='config.txt', target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) cls.synchronizer = cls.connector.doc_managers[0] cls.connector.start() assert_soon(lambda: len(cls.connector.shard_set) != 0)
def test_connector(self): """Test whether the connector initiates properly """ if not self.flag: self.fail("Shards cannot be added to mongos") conn = Connector(address=MAIN_ADDRESS, oplog_checkpoint=CONFIG, target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) conn.start() while len(conn.shard_set) != 1: time.sleep(2) conn.join() self.assertFalse(conn.can_run) time.sleep(5) for thread in conn.shard_set.values(): self.assertFalse(thread.running)
def setUp(self): self.repl_set = self.replica_set_class().start() self.conn = self.repl_set.client() try: os.unlink("oplog.timestamp") except OSError: pass self._remove() self.connector = Connector(mongo_address=self.repl_set.uri, doc_managers=(self.mongo_doc, ), namespace_options={ "test.test": { "gridfs": True }, "rename.me": "new.target", "rename.me2": "new2.target2", }, **connector_opts) self.connector.start() assert_soon(lambda: len(self.connector.shard_set) > 0) assert_soon(lambda: sum(1 for _ in self._search()) == 0)
def setUp(self): """Start a new Connector for each test.""" super(TestElastic, self).setUp() try: os.unlink("config.txt") except OSError: pass open("config.txt", "w").close() self.connector = Connector( address='%s:%s' % (mongo_host, self.primary_p), oplog_checkpoint='config.txt', target_url=elastic_pair, ns_set=['test.test'], u_key='_id', auth_key=None, doc_manager='mongo_connector/doc_managers/elastic_doc_manager.py', auto_commit_interval=0 ) self.conn.test.test.drop() self.connector.start() assert_soon(lambda: len(self.connector.shard_set) > 0) assert_soon(lambda: self._count() == 0)
def test_write_oplog_progress(self): """Test write_oplog_progress under several circumstances """ os.system('touch %s' % (TEMP_CONFIG)) config_file_path = TEMP_CONFIG conn = Connector(address=MAIN_ADDRESS, oplog_checkpoint=config_file_path, target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) #test that None is returned if there is no config file specified. self.assertEqual(conn.write_oplog_progress(), None) conn.oplog_progress.get_dict()[1] = Timestamp(12, 34) #pretend to insert a thread/timestamp pair conn.write_oplog_progress() data = json.load(open(config_file_path, 'r')) self.assertEqual(1, int(data[0])) self.assertEqual(long_to_bson_ts(int(data[1])), Timestamp(12, 34)) #ensure the temp file was deleted self.assertFalse(os.path.exists(config_file_path + '~')) #ensure that updates work properly conn.oplog_progress.get_dict()[1] = Timestamp(44, 22) conn.write_oplog_progress() config_file = open(config_file_path, 'r') data = json.load(config_file) self.assertEqual(1, int(data[0])) self.assertEqual(long_to_bson_ts(int(data[1])), Timestamp(44, 22)) os.system('rm ' + config_file_path) config_file.close()