def setUp(self): # Create a new oplog progress file try: os.unlink("oplog.timestamp") except OSError: pass open("oplog.timestamp", "w").close() # Start a replica set self.repl_set = ReplicaSet().start() # Connection to the replica set as a whole self.main_conn = self.repl_set.client() # Connection to the primary specifically self.primary_conn = self.repl_set.primary.client() # Connection to the secondary specifically self.secondary_conn = self.repl_set.secondary.client( read_preference=ReadPreference.SECONDARY_PREFERRED) # Wipe any test data self.main_conn.drop_database("test") # Oplog thread doc_manager = DocManager() oplog_progress = LockingDict() dest_mapping_stru = DestMapping(["test.mc"], [], {}) self.opman = OplogThread(primary_client=self.main_conn, doc_managers=(doc_manager, ), oplog_progress_dict=oplog_progress, dest_mapping_stru=dest_mapping_stru, ns_set=["test.mc"])
def test_skipped_oplog_entry_updates_checkpoint(self): repl_set = ReplicaSetSingle().start() conn = repl_set.client() opman = OplogThread( primary_client=conn, doc_managers=(DocManager(),), oplog_progress_dict=LockingDict(), dest_mapping_stru=DestMapping(["test.test"], [], {}), ns_set=set(["test.test"]) ) opman.start() # Insert a document into a ns_set collection conn["test"]["test"].insert_one({"test": 1}) last_ts = opman.get_last_oplog_timestamp() assert_soon(lambda: last_ts == opman.checkpoint, "OplogThread never updated checkpoint to non-skipped " "entry.") self.assertEqual(len(opman.doc_managers[0]._search()), 1) # Make sure that the oplog thread updates its checkpoint on every # oplog entry. conn["test"]["ignored"].insert_one({"test": 1}) last_ts = opman.get_last_oplog_timestamp() assert_soon(lambda: last_ts == opman.checkpoint, "OplogThread never updated checkpoint to skipped entry.") opman.join() conn.close() repl_set.stop()
def setUp(self): self.repl_set = ReplicaSetSingle().start() self.primary_conn = self.repl_set.client() self.oplog_coll = self.primary_conn.local['oplog.rs'] self.dest_mapping_stru = DestMapping([], [], {}) self.opman = OplogThread( primary_client=self.primary_conn, doc_managers=(DocManager(),), oplog_progress_dict=LockingDict(), dest_mapping_stru=self.dest_mapping_stru, )
def test_dump_collection(self): """Test the dump_collection method Cases: 1. empty oplog 2. non-empty oplog 3. non-empty oplog, specified a namespace-set, none of the oplog entries are for collections in the namespace-set """ # Test with empty oplog self.opman.oplog = self.primary_conn["test"]["emptycollection"] last_ts = self.opman.dump_collection() self.assertEqual(last_ts, None) # Test with non-empty oplog self.opman.oplog = self.primary_conn["local"]["oplog.rs"] for i in range(1000): self.primary_conn["test"]["test"].insert_one({ "i": i + 500 }) last_ts = self.opman.get_last_oplog_timestamp() self.assertEqual(last_ts, self.opman.dump_collection()) self.assertEqual(len(self.opman.doc_managers[0]._search()), 1000) # Case 3 # 1MB oplog so that we can rollover quickly repl_set = ReplicaSetSingle(oplogSize=1).start() conn = repl_set.client() dest_mapping_stru = DestMapping(["test.test"], [], {}) opman = OplogThread( primary_client=conn, doc_managers=(DocManager(),), oplog_progress_dict=LockingDict(), dest_mapping_stru=dest_mapping_stru, ns_set=set(["test.test"]) ) # Insert a document into a ns_set collection conn["test"]["test"].insert_one({"test": 1}) # Cause the oplog to rollover on a non-ns_set collection while conn["local"]["oplog.rs"].find_one({"ns": "test.test"}): conn["test"]["ignored"].insert_many( [{"test": "1" * 1024} for _ in range(1024)]) last_ts = opman.get_last_oplog_timestamp() self.assertEqual(last_ts, opman.dump_collection()) self.assertEqual(len(opman.doc_managers[0]._search()), 1) conn.close() repl_set.stop()
def test_commands(self): # Also test with namespace mapping. # Note that mongo-connector does not currently support commands after # renaming a database. dest_mapping_stru = DestMapping( ['test.test', 'test.test2', 'test.drop'], [], { 'test.test': 'test.othertest', 'test.drop': 'dropped.collection' }) self.choosy_docman.command_helper = CommandHelper(dest_mapping_stru) try: self.choosy_docman.handle_command({'create': 'test'}, *TESTARGS) self.assertIn('othertest', self.mongo_conn['test'].collection_names()) self.choosy_docman.handle_command( { 'renameCollection': 'test.test', 'to': 'test.test2' }, 'admin.$cmd', 1) self.assertNotIn('othertest', self.mongo_conn['test'].collection_names()) self.assertIn('test2', self.mongo_conn['test'].collection_names()) self.choosy_docman.handle_command({'drop': 'test2'}, 'test.$cmd', 1) self.assertNotIn('test2', self.mongo_conn['test'].collection_names()) # WiredTiger drops the database when the last collection is # dropped. if 'test' not in self.mongo_conn.database_names(): self.choosy_docman.handle_command({'create': 'test'}, *TESTARGS) self.assertIn('test', self.mongo_conn.database_names()) self.choosy_docman.handle_command({'dropDatabase': 1}, 'test.$cmd', 1) self.assertNotIn('test', self.mongo_conn.database_names()) # Briefly test mapped database name with dropDatabase command. self.mongo_conn.dropped.collection.insert_one({'a': 1}) self.assertIn('dropped', self.mongo_conn.database_names()) self.choosy_docman.handle_command({'dropDatabase': 1}, 'test.$cmd', 1) self.assertNotIn('dropped', self.mongo_conn.database_names()) finally: self.mongo_conn.drop_database('test')
def initOplogThread(self, namespace_set=[], ex_namespace_set=[], dest_mapping={}): self.docman = CommandLoggerDocManager() # Replace the origin dest_mapping self.dest_mapping_stru = DestMapping(namespace_set, ex_namespace_set, dest_mapping) self.docman.command_helper = CommandHelper(self.dest_mapping_stru) self.opman = OplogThread(primary_client=self.primary_conn, doc_managers=(self.docman, ), oplog_progress_dict=self.oplog_progress, dest_mapping_stru=self.dest_mapping_stru, ns_set=namespace_set, ex_ns_set=ex_namespace_set, collection_dump=False) self.opman.start()
def test_command_helper(self): mapping = {'a.x': 'b.x', 'a.y': 'c.y'} # Replace the origin dest_mapping dest_mapping_stru = DestMapping(list(mapping) + ['a.z'], [], mapping) helper = CommandHelper(dest_mapping_stru) self.assertEqual(set(helper.map_db('a')), set(['a', 'b', 'c'])) self.assertEqual(helper.map_db('d'), []) self.assertEqual(helper.map_namespace('a.x'), 'b.x') self.assertEqual(helper.map_namespace('a.z'), 'a.z') self.assertEqual(helper.map_namespace('d.x'), None) self.assertEqual(helper.map_collection('a', 'x'), ('b', 'x')) self.assertEqual(helper.map_collection('a', 'z'), ('a', 'z')) self.assertEqual(helper.map_collection('d', 'x'), (None, None))
def reset_opman(self, include_ns=None, exclude_ns=None, dest_mapping=None): if include_ns is None: include_ns = [] if exclude_ns is None: exclude_ns = [] if dest_mapping is None: dest_mapping = {} # include_ns must not exist together with exclude_ns # dest_mapping must exist together with include_ns # those checks have been tested in test_config.py so we skip that here. self.dest_mapping_stru = DestMapping(include_ns, exclude_ns, dest_mapping) self.opman = OplogThread(primary_client=self.primary_conn, doc_managers=(DocManager(), ), oplog_progress_dict=LockingDict(), dest_mapping_stru=self.dest_mapping_stru, ns_set=include_ns, ex_ns_set=exclude_ns)
def setUp(self): self.dest_mapping_stru = DestMapping([], [], {}) self.opman = OplogThread(primary_client=self.primary_conn, doc_managers=(DocManager(), ), oplog_progress_dict=LockingDict(), dest_mapping_stru=self.dest_mapping_stru)
def __init__(self, mongo_address, doc_managers=None, **kwargs): super(Connector, self).__init__() # can_run is set to false when we join the thread self.can_run = True # The signal that caused the connector to stop or None self.signal = None # main address - either mongos for sharded setups or a primary otherwise self.address = mongo_address # connection to the main address self.main_conn = None # List of DocManager instances if doc_managers: self.doc_managers = doc_managers else: LOG.warning('No doc managers specified, using simulator.') self.doc_managers = (simulator.DocManager(),) # Password for authentication self.auth_key = kwargs.pop('auth_key', None) # Username for authentication self.auth_username = kwargs.pop('auth_username', None) # The name of the file that stores the progress of the OplogThreads self.oplog_checkpoint = kwargs.pop('oplog_checkpoint', 'oplog.timestamp') # The set of OplogThreads created self.shard_set = {} # Dict of OplogThread/timestamp pairs to record progress self.oplog_progress = LockingDict() # Timezone awareness self.tz_aware = kwargs.get('tz_aware', False) # SSL keyword arguments to MongoClient. ssl_certfile = kwargs.pop('ssl_certfile', None) ssl_ca_certs = kwargs.pop('ssl_ca_certs', None) ssl_keyfile = kwargs.pop('ssl_keyfile', None) ssl_cert_reqs = kwargs.pop('ssl_cert_reqs', None) self.ssl_kwargs = {} if ssl_certfile: self.ssl_kwargs['ssl_certfile'] = ssl_certfile if ssl_ca_certs: self.ssl_kwargs['ssl_ca_certs'] = ssl_ca_certs if ssl_keyfile: self.ssl_kwargs['ssl_keyfile'] = ssl_keyfile if ssl_cert_reqs: self.ssl_kwargs['ssl_cert_reqs'] = ssl_cert_reqs # Save the rest of kwargs. self.kwargs = kwargs # Replace the origin dest_mapping self.dest_mapping = DestMapping(kwargs.get('ns_set', []), kwargs.get('ex_ns_set', []), kwargs.get('dest_mapping', {})) # Initialize and set the command helper command_helper = CommandHelper(self.dest_mapping) for dm in self.doc_managers: dm.command_helper = command_helper if self.oplog_checkpoint is not None: if not os.path.exists(self.oplog_checkpoint): info_str = ("MongoConnector: Can't find %s, " "attempting to create an empty progress log" % self.oplog_checkpoint) LOG.warning(info_str) try: # Create oplog progress file open(self.oplog_checkpoint, "w").close() except IOError as e: LOG.critical("MongoConnector: Could not " "create a progress log: %s" % str(e)) sys.exit(2) else: if (not os.access(self.oplog_checkpoint, os.W_OK) and not os.access(self.oplog_checkpoint, os.R_OK)): LOG.critical("Invalid permissions on %s! Exiting" % (self.oplog_checkpoint)) sys.exit(2)
def set_up_sharded_cluster(self, sharded_cluster_type): """ Initialize the cluster: Clean out the databases used by the tests Make connections to mongos, mongods Create and shard test collections Create OplogThreads """ self.cluster = sharded_cluster_type().start() # Connection to mongos self.mongos_conn = self.cluster.client() # Connections to the shards self.shard1_conn = self.cluster.shards[0].client() self.shard2_conn = self.cluster.shards[1].client() # Wipe any test data self.mongos_conn["test"]["mcsharded"].drop() # Disable the balancer before creating the collection self.mongos_conn.config.settings.update_one( {"_id": "balancer"}, {"$set": {"stopped": True}}, upsert=True ) # Create and shard the collection test.mcsharded on the "i" field self.mongos_conn["test"]["mcsharded"].create_index("i") self.mongos_conn.admin.command("enableSharding", "test") self.mongos_conn.admin.command("shardCollection", "test.mcsharded", key={"i": 1}) # Pre-split the collection so that: # i < 1000 lives on shard1 # i >= 1000 lives on shard2 self.mongos_conn.admin.command(bson.SON([ ("split", "test.mcsharded"), ("middle", {"i": 1000}) ])) # Move chunks to their proper places try: self.mongos_conn["admin"].command( "moveChunk", "test.mcsharded", find={"i": 1}, to='demo-set-0' ) except pymongo.errors.OperationFailure: pass try: self.mongos_conn["admin"].command( "moveChunk", "test.mcsharded", find={"i": 1000}, to='demo-set-1' ) except pymongo.errors.OperationFailure: pass # Make sure chunks are distributed correctly self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1}) self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1000}) def chunks_moved(): doc1 = self.shard1_conn.test.mcsharded.find_one() doc2 = self.shard2_conn.test.mcsharded.find_one() if None in (doc1, doc2): return False return doc1['i'] == 1 and doc2['i'] == 1000 assert_soon(chunks_moved, max_tries=120, message='chunks not moved? doc1=%r, doc2=%r' % ( self.shard1_conn.test.mcsharded.find_one(), self.shard2_conn.test.mcsharded.find_one())) self.mongos_conn.test.mcsharded.delete_many({}) # create a new oplog progress file try: os.unlink("oplog.timestamp") except OSError: pass open("oplog.timestamp", "w").close() # Oplog threads (oplog manager) for each shard doc_manager = DocManager() oplog_progress = LockingDict() dest_mapping_stru = DestMapping(["test.mcsharded", "test.mcunsharded"], [], {}) self.opman1 = OplogThread( primary_client=self.shard1_conn, doc_managers=(doc_manager,), oplog_progress_dict=oplog_progress, dest_mapping_stru=dest_mapping_stru, ns_set=["test.mcsharded", "test.mcunsharded"], mongos_client=self.mongos_conn ) self.opman2 = OplogThread( primary_client=self.shard2_conn, doc_managers=(doc_manager,), oplog_progress_dict=oplog_progress, dest_mapping_stru=dest_mapping_stru, ns_set=["test.mcsharded", "test.mcunsharded"], mongos_client=self.mongos_conn )
def test_namespace_mapping(self): """Test mapping of namespaces Cases: upsert/delete/update of documents: 1. in namespace set, mapping provided 2. outside of namespace set, mapping provided """ source_ns = ["test.test1", "test.test2"] phony_ns = ["test.phony1", "test.phony2"] dest_mapping = {"test.test1": "test.test1_dest", "test.test2": "test.test2_dest"} dest_mapping_stru = DestMapping(source_ns, [], dest_mapping) self.opman.dest_mapping = dest_mapping self.opman.dest_mapping_stru = dest_mapping_stru self.opman.namespace_set = source_ns docman = self.opman.doc_managers[0] # start replicating self.opman.start() base_doc = {"_id": 1, "name": "superman"} # doc in namespace set for ns in source_ns: db, coll = ns.split(".", 1) # test insert self.primary_conn[db][coll].insert_one(base_doc) assert_soon(lambda: len(docman._search()) == 1) self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns]) bad = [d for d in docman._search() if d["ns"] == ns] self.assertEqual(len(bad), 0) # test update self.primary_conn[db][coll].update_one( {"_id": 1}, {"$set": {"weakness": "kryptonite"}} ) def update_complete(): docs = docman._search() for d in docs: if d.get("weakness") == "kryptonite": return True return False assert_soon(update_complete) self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns]) bad = [d for d in docman._search() if d["ns"] == ns] self.assertEqual(len(bad), 0) # test delete self.primary_conn[db][coll].delete_one({"_id": 1}) assert_soon(lambda: len(docman._search()) == 0) bad = [d for d in docman._search() if d["ns"] == dest_mapping[ns]] self.assertEqual(len(bad), 0) # cleanup self.primary_conn[db][coll].delete_many({}) self.opman.doc_managers[0]._delete() # doc not in namespace set for ns in phony_ns: db, coll = ns.split(".", 1) # test insert self.primary_conn[db][coll].insert_one(base_doc) time.sleep(1) self.assertEqual(len(docman._search()), 0) # test update self.primary_conn[db][coll].update_one( {"_id": 1}, {"$set": {"weakness": "kryptonite"}} ) time.sleep(1) self.assertEqual(len(docman._search()), 0)
def setup_mapping(self, include, exclude, mapping): self.mapping = DestMapping(include, exclude, mapping)
class TestDestMapping(unittest.TestCase): def setup_mapping(self, include, exclude, mapping): self.mapping = DestMapping(include, exclude, mapping) def test_default(self): # By default, all namespaces are kept without renaming self.setup_mapping([], [], {}) self.assertEqual(self.mapping.get("db1.col1", "db1.col1"), "db1.col1") self.assertFalse(self.mapping.get_key("db1.col1")) self.assertListEqual(self.mapping.map_db("db1"), ["db1"]) self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") def test_only_include(self): # Test that only provides include namespaces # Test plain case self.setup_mapping(["db1.col1", "db1.col2", "db1.col3"], [], {}) self.assertEqual(self.mapping.get("db1.col1"), "db1.col1") self.assertEqual(self.mapping.get("db1.col2"), "db1.col2") self.assertEqual(self.mapping.get("db1.col3"), "db1.col3") self.assertEqual(self.mapping.get_key("db1.col1"), "db1.col1") self.assertListEqual(self.mapping.map_db("db1"), ["db1"]) self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") self.assertIsNone(self.mapping.map_namespace("db1.col4")) # Test wildcard case self.setup_mapping(["db1.*"], [], {}) self.assertFalse(self.mapping.get_key("db1.col1")) self.assertEqual(self.mapping.get("db1.col1"), "db1.col1") self.assertEqual(self.mapping.get_key("db1.col1"), "db1.col1") self.assertListEqual(self.mapping.map_db("db1"), ["db1"]) self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") self.assertIsNone(self.mapping.map_namespace("db2.col4")) def test_only_exclude(self): # Test that only provides exclude namespaces # Test plain case self.setup_mapping([], ["db1.col4"], {}) self.assertEqual(self.mapping.get("db1.col1", "db1.col1"), "db1.col1") self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") self.assertIsNone(self.mapping.map_namespace("db1.col4")) # Test wildcard case self.setup_mapping([], ["db2.*"], {}) self.assertEqual(self.mapping.get("db1.col1", "db1.col1"), "db1.col1") self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") self.assertIsNone(self.mapping.map_namespace("db2.col")) def test_mapping(self): # mulitple dbs cannot be mapped to the same db mapping = {"db1.col1": "newdb.newcol", "db2.col1": "newdb.newcol"} self.assertRaises(errors.InvalidConfiguration, self.setup_mapping, ["db1.col1", "db2.col1"], [], mapping) # Test mapping mapping = { "db1.*": "newdb1_*.newcol", "db2.a": "newdb2_a.x", "db2.b": "b_newdb2.x" } self.setup_mapping(["db1.*", "db2.a", "db2.b"], [], mapping) self.assertDictEqual(self.mapping.plain, { "db2.a": "newdb2_a.x", "db2.b": "b_newdb2.x" }) self.assertDictEqual(self.mapping.plain_db, {"db2": set(["newdb2_a", "b_newdb2"])}) self.assertDictEqual(self.mapping.reverse_plain, { "newdb2_a.x": "db2.a", "b_newdb2.x": "db2.b" }) self.assertDictEqual(self.mapping.wildcard, {"db1.*": "newdb1_*.newcol"}) self.assertEqual(self.mapping.get_key("newdb2_a.x"), "db2.a") self.assertSetEqual(set(self.mapping.map_db("db2")), set(["newdb2_a", "b_newdb2"])) # when we get matched maps, plain should contain those ones afterwards self.assertEqual(self.mapping.get("db1.col1"), "newdb1_col1.newcol") self.assertEqual(self.mapping.map_namespace("db1.col2"), "newdb1_col2.newcol") self.assertDictEqual( self.mapping.plain, { "db2.a": "newdb2_a.x", "db2.b": "b_newdb2.x", "db1.col1": "newdb1_col1.newcol", "db1.col2": "newdb1_col2.newcol" }) self.assertDictEqual( self.mapping.plain_db, { "db2": set(["newdb2_a", "b_newdb2"]), "db1": set(["newdb1_col1", "newdb1_col2"]) }) self.assertDictEqual( self.mapping.reverse_plain, { "newdb2_a.x": "db2.a", "b_newdb2.x": "db2.b", "newdb1_col1.newcol": "db1.col1", "newdb1_col2.newcol": "db1.col2" })
class TestDestMapping(unittest.TestCase): def setup_mapping(self, include, exclude, mapping): self.mapping = DestMapping(include, exclude, mapping) def test_default(self): # By default, all namespaces are kept without renaming self.setup_mapping([], [], {}) self.assertEqual(self.mapping.get("db1.col1", "db1.col1"), "db1.col1") self.assertFalse(self.mapping.get_key("db1.col1")) self.assertListEqual(self.mapping.map_db("db1"), ["db1"]) self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") def test_only_include(self): # Test that only provides include namespaces # Test plain case self.setup_mapping(["db1.col1", "db1.col2", "db1.col3"], [], {}) self.assertEqual(self.mapping.get("db1.col1"), "db1.col1") self.assertEqual(self.mapping.get("db1.col2"), "db1.col2") self.assertEqual(self.mapping.get("db1.col3"), "db1.col3") self.assertEqual(self.mapping.get_key("db1.col1"), "db1.col1") self.assertListEqual(self.mapping.map_db("db1"), ["db1"]) self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") self.assertIsNone(self.mapping.map_namespace("db1.col4")) # Test wildcard case self.setup_mapping(["db1.*"], [], {}) self.assertFalse(self.mapping.get_key("db1.col1")) self.assertEqual(self.mapping.get("db1.col1"), "db1.col1") self.assertEqual(self.mapping.get_key("db1.col1"), "db1.col1") self.assertListEqual(self.mapping.map_db("db1"), ["db1"]) self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") self.assertIsNone(self.mapping.map_namespace("db2.col4")) def test_only_exclude(self): # Test that only provides exclude namespaces # Test plain case self.setup_mapping([], ["db1.col4"], {}) self.assertEqual(self.mapping.get("db1.col1", "db1.col1"), "db1.col1") self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") self.assertIsNone(self.mapping.map_namespace("db1.col4")) # Test wildcard case self.setup_mapping([], ["db2.*"], {}) self.assertEqual(self.mapping.get("db1.col1", "db1.col1"), "db1.col1") self.assertEqual(self.mapping.map_namespace("db1.col1"), "db1.col1") self.assertIsNone(self.mapping.map_namespace("db2.col")) def test_mapping(self): # mulitple dbs cannot be mapped to the same db mapping = { "db1.col1": "newdb.newcol", "db2.col1": "newdb.newcol" } self.assertRaises(errors.InvalidConfiguration, self.setup_mapping, ["db1.col1", "db2.col1"], [], mapping) # Test mapping mapping = { "db1.*": "newdb1_*.newcol", "db2.a": "newdb2_a.x", "db2.b": "b_newdb2.x" } self.setup_mapping(["db1.*", "db2.a", "db2.b"], [], mapping) self.assertDictEqual(self.mapping.plain, {"db2.a": "newdb2_a.x", "db2.b": "b_newdb2.x"}) self.assertDictEqual(self.mapping.plain_db, {"db2": set(["newdb2_a", "b_newdb2"])}) self.assertDictEqual(self.mapping.reverse_plain, {"newdb2_a.x": "db2.a", "b_newdb2.x": "db2.b"}) self.assertDictEqual(self.mapping.wildcard, {"db1.*": "newdb1_*.newcol"}) self.assertEqual(self.mapping.get_key("newdb2_a.x"), "db2.a") self.assertSetEqual(set(self.mapping.map_db("db2")), set(["newdb2_a", "b_newdb2"])) # when we get matched maps, plain should contain those ones afterwards self.assertEqual(self.mapping.get("db1.col1"), "newdb1_col1.newcol") self.assertEqual(self.mapping.map_namespace("db1.col2"), "newdb1_col2.newcol") self.assertDictEqual(self.mapping.plain, {"db2.a": "newdb2_a.x", "db2.b": "b_newdb2.x", "db1.col1": "newdb1_col1.newcol", "db1.col2": "newdb1_col2.newcol"}) self.assertDictEqual(self.mapping.plain_db, {"db2": set(["newdb2_a", "b_newdb2"]), "db1": set(["newdb1_col1", "newdb1_col2"])}) self.assertDictEqual(self.mapping.reverse_plain, {"newdb2_a.x": "db2.a", "b_newdb2.x": "db2.b", "newdb1_col1.newcol": "db1.col1", "newdb1_col2.newcol": "db1.col2"})